var/home/core/zuul-output/0000755000175000017500000000000015114736017014532 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015114750447015501 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005153725015114750441017707 0ustar rootrootDec 06 05:19:36 crc systemd[1]: Starting Kubernetes Kubelet... Dec 06 05:19:36 crc restorecon[4705]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:36 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Dec 06 05:19:37 crc restorecon[4705]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Dec 06 05:19:37 crc kubenswrapper[4706]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 06 05:19:37 crc kubenswrapper[4706]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Dec 06 05:19:37 crc kubenswrapper[4706]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 06 05:19:37 crc kubenswrapper[4706]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 06 05:19:37 crc kubenswrapper[4706]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Dec 06 05:19:37 crc kubenswrapper[4706]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.833184 4706 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836748 4706 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836772 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836790 4706 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836797 4706 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836802 4706 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836809 4706 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836816 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836823 4706 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836828 4706 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836834 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836838 4706 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836844 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836848 4706 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836853 4706 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836858 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836863 4706 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836868 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836873 4706 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836878 4706 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836883 4706 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836887 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836893 4706 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836898 4706 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836902 4706 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836907 4706 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836912 4706 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836917 4706 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836925 4706 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836931 4706 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836937 4706 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836943 4706 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836949 4706 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836954 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836959 4706 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836964 4706 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836969 4706 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836974 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836980 4706 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836985 4706 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836991 4706 feature_gate.go:330] unrecognized feature gate: Example Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.836997 4706 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837001 4706 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837006 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837011 4706 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837018 4706 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837024 4706 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837030 4706 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837035 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837040 4706 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837066 4706 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837072 4706 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837077 4706 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837082 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837087 4706 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837091 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837096 4706 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837101 4706 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837106 4706 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837111 4706 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837115 4706 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837121 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837126 4706 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837131 4706 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837137 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837142 4706 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837148 4706 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837155 4706 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837161 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837169 4706 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837176 4706 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.837183 4706 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837290 4706 flags.go:64] FLAG: --address="0.0.0.0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837302 4706 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837313 4706 flags.go:64] FLAG: --anonymous-auth="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837321 4706 flags.go:64] FLAG: --application-metrics-count-limit="100" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837328 4706 flags.go:64] FLAG: --authentication-token-webhook="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837334 4706 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837342 4706 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837350 4706 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837356 4706 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837362 4706 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837368 4706 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837374 4706 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837380 4706 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837387 4706 flags.go:64] FLAG: --cgroup-root="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837392 4706 flags.go:64] FLAG: --cgroups-per-qos="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837398 4706 flags.go:64] FLAG: --client-ca-file="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837404 4706 flags.go:64] FLAG: --cloud-config="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837409 4706 flags.go:64] FLAG: --cloud-provider="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837415 4706 flags.go:64] FLAG: --cluster-dns="[]" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837422 4706 flags.go:64] FLAG: --cluster-domain="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837429 4706 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837435 4706 flags.go:64] FLAG: --config-dir="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837441 4706 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837447 4706 flags.go:64] FLAG: --container-log-max-files="5" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837455 4706 flags.go:64] FLAG: --container-log-max-size="10Mi" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837461 4706 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837467 4706 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837482 4706 flags.go:64] FLAG: --containerd-namespace="k8s.io" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837488 4706 flags.go:64] FLAG: --contention-profiling="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837494 4706 flags.go:64] FLAG: --cpu-cfs-quota="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837499 4706 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837505 4706 flags.go:64] FLAG: --cpu-manager-policy="none" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837511 4706 flags.go:64] FLAG: --cpu-manager-policy-options="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837517 4706 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837523 4706 flags.go:64] FLAG: --enable-controller-attach-detach="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837529 4706 flags.go:64] FLAG: --enable-debugging-handlers="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837534 4706 flags.go:64] FLAG: --enable-load-reader="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837540 4706 flags.go:64] FLAG: --enable-server="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837545 4706 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837558 4706 flags.go:64] FLAG: --event-burst="100" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837564 4706 flags.go:64] FLAG: --event-qps="50" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837569 4706 flags.go:64] FLAG: --event-storage-age-limit="default=0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837575 4706 flags.go:64] FLAG: --event-storage-event-limit="default=0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837580 4706 flags.go:64] FLAG: --eviction-hard="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837588 4706 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837593 4706 flags.go:64] FLAG: --eviction-minimum-reclaim="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837599 4706 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837605 4706 flags.go:64] FLAG: --eviction-soft="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837611 4706 flags.go:64] FLAG: --eviction-soft-grace-period="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837616 4706 flags.go:64] FLAG: --exit-on-lock-contention="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837622 4706 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837628 4706 flags.go:64] FLAG: --experimental-mounter-path="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837634 4706 flags.go:64] FLAG: --fail-cgroupv1="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837639 4706 flags.go:64] FLAG: --fail-swap-on="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837645 4706 flags.go:64] FLAG: --feature-gates="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837653 4706 flags.go:64] FLAG: --file-check-frequency="20s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837660 4706 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837666 4706 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837672 4706 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837680 4706 flags.go:64] FLAG: --healthz-port="10248" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837686 4706 flags.go:64] FLAG: --help="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837692 4706 flags.go:64] FLAG: --hostname-override="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837698 4706 flags.go:64] FLAG: --housekeeping-interval="10s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837704 4706 flags.go:64] FLAG: --http-check-frequency="20s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837711 4706 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837717 4706 flags.go:64] FLAG: --image-credential-provider-config="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837722 4706 flags.go:64] FLAG: --image-gc-high-threshold="85" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837728 4706 flags.go:64] FLAG: --image-gc-low-threshold="80" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837734 4706 flags.go:64] FLAG: --image-service-endpoint="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837740 4706 flags.go:64] FLAG: --kernel-memcg-notification="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837745 4706 flags.go:64] FLAG: --kube-api-burst="100" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837751 4706 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837757 4706 flags.go:64] FLAG: --kube-api-qps="50" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837763 4706 flags.go:64] FLAG: --kube-reserved="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837769 4706 flags.go:64] FLAG: --kube-reserved-cgroup="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837774 4706 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837780 4706 flags.go:64] FLAG: --kubelet-cgroups="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837786 4706 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837792 4706 flags.go:64] FLAG: --lock-file="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837798 4706 flags.go:64] FLAG: --log-cadvisor-usage="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837803 4706 flags.go:64] FLAG: --log-flush-frequency="5s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837809 4706 flags.go:64] FLAG: --log-json-info-buffer-size="0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837818 4706 flags.go:64] FLAG: --log-json-split-stream="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837824 4706 flags.go:64] FLAG: --log-text-info-buffer-size="0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837830 4706 flags.go:64] FLAG: --log-text-split-stream="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837836 4706 flags.go:64] FLAG: --logging-format="text" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837841 4706 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837848 4706 flags.go:64] FLAG: --make-iptables-util-chains="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837853 4706 flags.go:64] FLAG: --manifest-url="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837859 4706 flags.go:64] FLAG: --manifest-url-header="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837867 4706 flags.go:64] FLAG: --max-housekeeping-interval="15s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837876 4706 flags.go:64] FLAG: --max-open-files="1000000" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837883 4706 flags.go:64] FLAG: --max-pods="110" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837889 4706 flags.go:64] FLAG: --maximum-dead-containers="-1" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837895 4706 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837900 4706 flags.go:64] FLAG: --memory-manager-policy="None" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837909 4706 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837915 4706 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837921 4706 flags.go:64] FLAG: --node-ip="192.168.126.11" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837940 4706 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837952 4706 flags.go:64] FLAG: --node-status-max-images="50" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837959 4706 flags.go:64] FLAG: --node-status-update-frequency="10s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837965 4706 flags.go:64] FLAG: --oom-score-adj="-999" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837971 4706 flags.go:64] FLAG: --pod-cidr="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837976 4706 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837989 4706 flags.go:64] FLAG: --pod-manifest-path="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.837995 4706 flags.go:64] FLAG: --pod-max-pids="-1" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838000 4706 flags.go:64] FLAG: --pods-per-core="0" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838006 4706 flags.go:64] FLAG: --port="10250" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838012 4706 flags.go:64] FLAG: --protect-kernel-defaults="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838017 4706 flags.go:64] FLAG: --provider-id="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838023 4706 flags.go:64] FLAG: --qos-reserved="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838029 4706 flags.go:64] FLAG: --read-only-port="10255" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838034 4706 flags.go:64] FLAG: --register-node="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838040 4706 flags.go:64] FLAG: --register-schedulable="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838065 4706 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838076 4706 flags.go:64] FLAG: --registry-burst="10" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838082 4706 flags.go:64] FLAG: --registry-qps="5" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838087 4706 flags.go:64] FLAG: --reserved-cpus="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838093 4706 flags.go:64] FLAG: --reserved-memory="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838100 4706 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838105 4706 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838111 4706 flags.go:64] FLAG: --rotate-certificates="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838120 4706 flags.go:64] FLAG: --rotate-server-certificates="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838126 4706 flags.go:64] FLAG: --runonce="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838132 4706 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838138 4706 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838145 4706 flags.go:64] FLAG: --seccomp-default="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838151 4706 flags.go:64] FLAG: --serialize-image-pulls="true" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838158 4706 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838164 4706 flags.go:64] FLAG: --storage-driver-db="cadvisor" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838170 4706 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838176 4706 flags.go:64] FLAG: --storage-driver-password="root" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838182 4706 flags.go:64] FLAG: --storage-driver-secure="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838188 4706 flags.go:64] FLAG: --storage-driver-table="stats" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838275 4706 flags.go:64] FLAG: --storage-driver-user="root" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838491 4706 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838668 4706 flags.go:64] FLAG: --sync-frequency="1m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838736 4706 flags.go:64] FLAG: --system-cgroups="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838747 4706 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838794 4706 flags.go:64] FLAG: --system-reserved-cgroup="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838804 4706 flags.go:64] FLAG: --tls-cert-file="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838814 4706 flags.go:64] FLAG: --tls-cipher-suites="[]" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838851 4706 flags.go:64] FLAG: --tls-min-version="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838857 4706 flags.go:64] FLAG: --tls-private-key-file="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838863 4706 flags.go:64] FLAG: --topology-manager-policy="none" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838869 4706 flags.go:64] FLAG: --topology-manager-policy-options="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838875 4706 flags.go:64] FLAG: --topology-manager-scope="container" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838883 4706 flags.go:64] FLAG: --v="2" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838898 4706 flags.go:64] FLAG: --version="false" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838908 4706 flags.go:64] FLAG: --vmodule="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838922 4706 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.838929 4706 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839472 4706 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839484 4706 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839490 4706 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839506 4706 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839511 4706 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839516 4706 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839520 4706 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839525 4706 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839530 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839534 4706 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839540 4706 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839545 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839551 4706 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839556 4706 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839561 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839565 4706 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839572 4706 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839580 4706 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839585 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839590 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839598 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839603 4706 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839608 4706 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839612 4706 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839617 4706 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839622 4706 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839626 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839631 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839635 4706 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839644 4706 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839648 4706 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839652 4706 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839657 4706 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839661 4706 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839666 4706 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839670 4706 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839674 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839679 4706 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839683 4706 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839690 4706 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839696 4706 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839703 4706 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839708 4706 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839712 4706 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839717 4706 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839720 4706 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839725 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839786 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839792 4706 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839799 4706 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839805 4706 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839809 4706 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839813 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839819 4706 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839831 4706 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839837 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839843 4706 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839847 4706 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839852 4706 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839857 4706 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839861 4706 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839866 4706 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839870 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839874 4706 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839878 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839883 4706 feature_gate.go:330] unrecognized feature gate: Example Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839889 4706 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839896 4706 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839901 4706 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839905 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.839909 4706 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.840270 4706 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.853204 4706 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.853253 4706 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853392 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853410 4706 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853420 4706 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853430 4706 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853440 4706 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853450 4706 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853460 4706 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853469 4706 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853479 4706 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853488 4706 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853497 4706 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853506 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853515 4706 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853524 4706 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853533 4706 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853542 4706 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853551 4706 feature_gate.go:330] unrecognized feature gate: Example Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853560 4706 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853571 4706 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853585 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853597 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853608 4706 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853620 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853630 4706 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853640 4706 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853650 4706 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853660 4706 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853670 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853679 4706 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853688 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853697 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853705 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853714 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853724 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853733 4706 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853742 4706 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853751 4706 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853759 4706 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853767 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853776 4706 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853786 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853794 4706 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853803 4706 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853812 4706 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853820 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853829 4706 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853837 4706 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853846 4706 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853854 4706 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853863 4706 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853872 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853880 4706 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853889 4706 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853897 4706 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853909 4706 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853921 4706 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853931 4706 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853941 4706 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853951 4706 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853961 4706 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853973 4706 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853983 4706 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.853992 4706 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854000 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854009 4706 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854018 4706 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854026 4706 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854035 4706 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854043 4706 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854081 4706 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854090 4706 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.854104 4706 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854344 4706 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854357 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854367 4706 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854377 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854386 4706 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854395 4706 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854403 4706 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854412 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854420 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854430 4706 feature_gate.go:330] unrecognized feature gate: GatewayAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854439 4706 feature_gate.go:330] unrecognized feature gate: NewOLM Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854447 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854456 4706 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854464 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854473 4706 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854482 4706 feature_gate.go:330] unrecognized feature gate: OVNObservability Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854490 4706 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854500 4706 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854542 4706 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854551 4706 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854560 4706 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854569 4706 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854577 4706 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854585 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854595 4706 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854603 4706 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854612 4706 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854620 4706 feature_gate.go:330] unrecognized feature gate: Example Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854628 4706 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854637 4706 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854645 4706 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854654 4706 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854662 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854673 4706 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854682 4706 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854691 4706 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854700 4706 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854708 4706 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854716 4706 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854725 4706 feature_gate.go:330] unrecognized feature gate: PlatformOperators Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854734 4706 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854742 4706 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854751 4706 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854759 4706 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854767 4706 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854779 4706 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854791 4706 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854802 4706 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854815 4706 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854824 4706 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854835 4706 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854846 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854856 4706 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854865 4706 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854877 4706 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854887 4706 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854899 4706 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854908 4706 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854920 4706 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854929 4706 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854939 4706 feature_gate.go:330] unrecognized feature gate: PinnedImages Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854949 4706 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854958 4706 feature_gate.go:330] unrecognized feature gate: SignatureStores Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854967 4706 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854976 4706 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854984 4706 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.854994 4706 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.855002 4706 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.855013 4706 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.855024 4706 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.855034 4706 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.855071 4706 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.855310 4706 server.go:940] "Client rotation is on, will bootstrap in background" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.860413 4706 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.860564 4706 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.861492 4706 server.go:997] "Starting client certificate rotation" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.861545 4706 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.862163 4706 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2026-01-03 05:41:39.698700345 +0000 UTC Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.862350 4706 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 672h22m1.836356555s for next certificate rotation Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.869663 4706 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.873043 4706 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.899586 4706 log.go:25] "Validated CRI v1 runtime API" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.922136 4706 log.go:25] "Validated CRI v1 image API" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.924516 4706 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.927946 4706 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-12-06-05-14-36-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.927988 4706 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:44 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.947072 4706 manager.go:217] Machine: {Timestamp:2025-12-06 05:19:37.94557702 +0000 UTC m=+0.273400984 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:228676c3-f175-4087-a116-e5c2da56f712 BootID:8a86399a-ffbc-43cf-804f-ca5cf554f1d4 Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:44 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:6c:b5:da Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:6c:b5:da Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:a1:94:a6 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ae:f9:11 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:c0:1d:7b Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:d4:d6:7a Speed:-1 Mtu:1496} {Name:eth10 MacAddress:c6:c7:33:52:f1:9d Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:76:5d:d1:ba:f8:52 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.947405 4706 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.947581 4706 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.947979 4706 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.948224 4706 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.948278 4706 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.948594 4706 topology_manager.go:138] "Creating topology manager with none policy" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.948612 4706 container_manager_linux.go:303] "Creating device plugin manager" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.948851 4706 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.948899 4706 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.949309 4706 state_mem.go:36] "Initialized new in-memory state store" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.949427 4706 server.go:1245] "Using root directory" path="/var/lib/kubelet" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.950204 4706 kubelet.go:418] "Attempting to sync node with API server" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.950236 4706 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.950272 4706 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.950291 4706 kubelet.go:324] "Adding apiserver pod source" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.950306 4706 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.952739 4706 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.953335 4706 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.954514 4706 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.954804 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.954942 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:37 crc kubenswrapper[4706]: E1206 05:19:37.954996 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:37 crc kubenswrapper[4706]: E1206 05:19:37.955039 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955387 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955430 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955446 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955461 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955486 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955502 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955516 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955541 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955557 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955572 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955592 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955607 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.955875 4706 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.956660 4706 server.go:1280] "Started kubelet" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.956765 4706 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.956852 4706 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.957646 4706 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Dec 06 05:19:37 crc systemd[1]: Started Kubernetes Kubelet. Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.964832 4706 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.964909 4706 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Dec 06 05:19:37 crc kubenswrapper[4706]: E1206 05:19:37.964655 4706 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187e88ac633e4df7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-06 05:19:37.956593143 +0000 UTC m=+0.284417117,LastTimestamp:2025-12-06 05:19:37.956593143 +0000 UTC m=+0.284417117,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.965424 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.965606 4706 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 08:09:47.027261066 +0000 UTC Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.965809 4706 volume_manager.go:287] "The desired_state_of_world populator starts" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.965846 4706 volume_manager.go:289] "Starting Kubelet Volume Manager" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.965959 4706 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Dec 06 05:19:37 crc kubenswrapper[4706]: E1206 05:19:37.966124 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:37 crc kubenswrapper[4706]: W1206 05:19:37.966644 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:37 crc kubenswrapper[4706]: E1206 05:19:37.966736 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:37 crc kubenswrapper[4706]: E1206 05:19:37.966815 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="200ms" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.969692 4706 server.go:460] "Adding debug handlers to kubelet server" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.972306 4706 factory.go:55] Registering systemd factory Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.972358 4706 factory.go:221] Registration of the systemd container factory successfully Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.972837 4706 factory.go:153] Registering CRI-O factory Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.972882 4706 factory.go:221] Registration of the crio container factory successfully Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.972999 4706 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.973038 4706 factory.go:103] Registering Raw factory Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.973112 4706 manager.go:1196] Started watching for new ooms in manager Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.974404 4706 manager.go:319] Starting recovery of all containers Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.984741 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.984910 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.984943 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.984970 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.984995 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985019 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985089 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985118 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985149 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985173 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985195 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985227 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985256 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985285 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985311 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985340 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985369 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985392 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985418 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985448 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985538 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985567 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985594 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985620 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985645 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985677 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985711 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985740 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985767 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985795 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985822 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985899 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985930 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985955 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.985983 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986010 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986036 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986147 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986175 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986204 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986235 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986260 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986285 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986310 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986338 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986364 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986389 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986422 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986447 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986471 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986501 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986531 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986608 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986641 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986673 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986702 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986728 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986755 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986780 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986806 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986834 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986863 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986888 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986916 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986940 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986968 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.986993 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987022 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987088 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987119 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987149 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987178 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987207 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987231 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987257 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987283 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987308 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987333 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987358 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987390 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987418 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987443 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987472 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987503 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987533 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987581 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987607 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987636 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987664 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987691 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987717 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987743 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987822 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987852 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987879 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987911 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987937 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.987975 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988007 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988036 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988097 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988143 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988178 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988206 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988247 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988279 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988310 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.988340 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989558 4706 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989623 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989662 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989696 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989724 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989756 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989830 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989859 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989891 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989917 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989943 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989968 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.989996 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990031 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990103 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990132 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990160 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990186 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990213 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990240 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990262 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990282 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990305 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990326 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990349 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990373 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990394 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990417 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990440 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990459 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990479 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990500 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990521 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990539 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.990649 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991592 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991667 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991696 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991741 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991767 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991806 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991839 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991868 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991905 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.991935 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.992392 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.992427 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.992460 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.992510 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993029 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993097 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993120 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993141 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993177 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993200 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993234 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993261 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993284 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993315 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993339 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993363 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993401 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993440 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993484 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993516 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993549 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993588 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993618 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993657 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993686 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993709 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993747 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993779 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993842 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993864 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993885 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993910 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993933 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993962 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.993984 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994005 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994040 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994092 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994113 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994144 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994169 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994198 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994219 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994243 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994277 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994298 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994330 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994355 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994376 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994406 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994428 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994458 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994482 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994505 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994536 4706 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994590 4706 reconstruct.go:97] "Volume reconstruction finished" Dec 06 05:19:37 crc kubenswrapper[4706]: I1206 05:19:37.994606 4706 reconciler.go:26] "Reconciler: start to sync state" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.013809 4706 manager.go:324] Recovery completed Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.029705 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.031910 4706 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.032437 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.032484 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.032498 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.034770 4706 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.034816 4706 status_manager.go:217] "Starting to sync pod status with apiserver" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.034826 4706 cpu_manager.go:225] "Starting CPU manager" policy="none" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.034855 4706 kubelet.go:2335] "Starting kubelet main sync loop" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.034856 4706 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.034896 4706 state_mem.go:36] "Initialized new in-memory state store" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.034912 4706 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Dec 06 05:19:38 crc kubenswrapper[4706]: W1206 05:19:38.035931 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.035997 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.066697 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.136008 4706 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.166838 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.167906 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="400ms" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.266988 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.336854 4706 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.367415 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.468121 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.569086 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.569730 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="800ms" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.670086 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.737028 4706 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.770610 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.871172 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.965798 4706 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 00:30:08.762380215 +0000 UTC Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.965883 4706 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 403h10m29.796501462s for next certificate rotation Dec 06 05:19:38 crc kubenswrapper[4706]: I1206 05:19:38.966185 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:38 crc kubenswrapper[4706]: E1206 05:19:38.972306 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.072802 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.173176 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.274137 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: W1206 05:19:39.313397 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.313584 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.371287 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="1.6s" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.375235 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.379003 4706 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187e88ac633e4df7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-06 05:19:37.956593143 +0000 UTC m=+0.284417117,LastTimestamp:2025-12-06 05:19:37.956593143 +0000 UTC m=+0.284417117,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 06 05:19:39 crc kubenswrapper[4706]: W1206 05:19:39.401961 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.402117 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:39 crc kubenswrapper[4706]: W1206 05:19:39.438992 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.439124 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.476396 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.538125 4706 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.576838 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: W1206 05:19:39.613946 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.614099 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.677576 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.777959 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.879039 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:39 crc kubenswrapper[4706]: I1206 05:19:39.966717 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:39 crc kubenswrapper[4706]: E1206 05:19:39.979889 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.080312 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.180766 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.281235 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.381827 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.482895 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.583407 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.683908 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.784175 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.885105 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:40 crc kubenswrapper[4706]: I1206 05:19:40.967145 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.972673 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="3.2s" Dec 06 05:19:40 crc kubenswrapper[4706]: E1206 05:19:40.985735 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.086272 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.138592 4706 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.187385 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.288308 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: W1206 05:19:41.350257 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.350378 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.388716 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.489372 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: I1206 05:19:41.522103 4706 policy_none.go:49] "None policy: Start" Dec 06 05:19:41 crc kubenswrapper[4706]: I1206 05:19:41.523474 4706 memory_manager.go:170] "Starting memorymanager" policy="None" Dec 06 05:19:41 crc kubenswrapper[4706]: I1206 05:19:41.523544 4706 state_mem.go:35] "Initializing new in-memory state store" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.590532 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.691211 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.791890 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.892592 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:41 crc kubenswrapper[4706]: W1206 05:19:41.907647 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.907726 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:41 crc kubenswrapper[4706]: I1206 05:19:41.965954 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:41 crc kubenswrapper[4706]: E1206 05:19:41.993283 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.093832 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.194608 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.295106 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.396304 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.496940 4706 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.554714 4706 manager.go:334] "Starting Device Plugin manager" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.554788 4706 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.554807 4706 server.go:79] "Starting device plugin registration server" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.555429 4706 eviction_manager.go:189] "Eviction manager: starting control loop" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.555458 4706 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.555865 4706 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.556000 4706 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.556016 4706 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.567340 4706 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 06 05:19:42 crc kubenswrapper[4706]: W1206 05:19:42.576124 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.576206 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.656133 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.658069 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.658123 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.658136 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.658169 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.658845 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Dec 06 05:19:42 crc kubenswrapper[4706]: W1206 05:19:42.733183 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.733309 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.859107 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.861373 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.861463 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.861485 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.861519 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:42 crc kubenswrapper[4706]: E1206 05:19:42.862289 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Dec 06 05:19:42 crc kubenswrapper[4706]: I1206 05:19:42.967621 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:43 crc kubenswrapper[4706]: I1206 05:19:43.263146 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:43 crc kubenswrapper[4706]: I1206 05:19:43.267424 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:43 crc kubenswrapper[4706]: I1206 05:19:43.267530 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:43 crc kubenswrapper[4706]: I1206 05:19:43.267553 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:43 crc kubenswrapper[4706]: I1206 05:19:43.267594 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:43 crc kubenswrapper[4706]: E1206 05:19:43.269191 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Dec 06 05:19:43 crc kubenswrapper[4706]: I1206 05:19:43.967216 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.069787 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.071272 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.071324 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.071333 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.071360 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:44 crc kubenswrapper[4706]: E1206 05:19:44.072083 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Dec 06 05:19:44 crc kubenswrapper[4706]: E1206 05:19:44.173879 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="6.4s" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.339402 4706 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.339540 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.341104 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.341146 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.341160 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.341286 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.341758 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.341857 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.342247 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.342279 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.342289 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.342369 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.342499 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.342535 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343253 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343280 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343291 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343440 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343468 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343480 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343571 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343676 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343711 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.343966 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344007 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344563 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344584 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344644 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344653 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344783 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.344976 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.345081 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.345897 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.345938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.345958 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.346206 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.346259 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.346709 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.346742 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.346755 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.347185 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.347225 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.347243 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399276 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399366 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399421 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399472 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399525 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399569 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399614 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399658 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399702 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399767 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399830 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399901 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.399957 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.400003 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.400103 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502192 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502266 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502298 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502318 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502341 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502362 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502383 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502407 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502428 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502386 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502357 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502462 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502475 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502481 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502498 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502508 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502605 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502659 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502516 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502527 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502740 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502530 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502733 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502820 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502880 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502883 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502885 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502923 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.502957 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.503015 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.685399 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.689536 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.712461 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.730739 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.736038 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:44 crc kubenswrapper[4706]: I1206 05:19:44.967198 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:45 crc kubenswrapper[4706]: W1206 05:19:45.547799 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-379b6be822a140a2c5d81d083c6f609e35d877c1013bfbdd6859294ecab0bdf6 WatchSource:0}: Error finding container 379b6be822a140a2c5d81d083c6f609e35d877c1013bfbdd6859294ecab0bdf6: Status 404 returned error can't find the container with id 379b6be822a140a2c5d81d083c6f609e35d877c1013bfbdd6859294ecab0bdf6 Dec 06 05:19:45 crc kubenswrapper[4706]: I1206 05:19:45.672893 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:45 crc kubenswrapper[4706]: I1206 05:19:45.675626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:45 crc kubenswrapper[4706]: I1206 05:19:45.675683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:45 crc kubenswrapper[4706]: I1206 05:19:45.675696 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:45 crc kubenswrapper[4706]: I1206 05:19:45.675727 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:45 crc kubenswrapper[4706]: E1206 05:19:45.676520 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Dec 06 05:19:45 crc kubenswrapper[4706]: W1206 05:19:45.927112 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:45 crc kubenswrapper[4706]: E1206 05:19:45.927221 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:45 crc kubenswrapper[4706]: I1206 05:19:45.967422 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.060991 4706 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6" exitCode=0 Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.061244 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.061427 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"691ef0c464bdd755dbf8b89dcb3412013be66d8c8a501f6a81502bc4768a39ef"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.061604 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.062902 4706 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a" exitCode=0 Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.062966 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.063004 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"eb9b14340b339fe11166c7dd6fb70f96c62ab4ce25f87b6cad7b643bb76d2944"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.063128 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.063347 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.063385 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.063404 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.064680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.064711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.064727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.065461 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44" exitCode=0 Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.065518 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.065539 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"379b6be822a140a2c5d81d083c6f609e35d877c1013bfbdd6859294ecab0bdf6"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.065634 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.066266 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.066286 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.066296 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.068205 4706 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440" exitCode=0 Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.068277 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.068363 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"03fdcbf7690ec0135a76c56a0c11f47f4dbc99675e559cd393274190dcfc9fc1"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.068553 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.068749 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.069719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.069759 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.069771 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.069954 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.069979 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"405e8777027a474a99d02de3574ce59952202f697deea28a30f57c94972d20f0"} Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.071369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.071408 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.071422 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:46 crc kubenswrapper[4706]: W1206 05:19:46.770874 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:46 crc kubenswrapper[4706]: E1206 05:19:46.770976 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:46 crc kubenswrapper[4706]: I1206 05:19:46.966519 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.074367 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.074430 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.078064 4706 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342" exitCode=0 Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.078107 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.078353 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.079395 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.079437 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.079445 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.080254 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.080291 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.100991 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"2ddfcf1ee5cd3d1223607128c54cdf5c250d9467340e5d21f9d2f169c96477d6"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.101171 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.102369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.102400 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.102409 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.104894 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.104964 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af"} Dec 06 05:19:47 crc kubenswrapper[4706]: I1206 05:19:47.967285 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.111665 4706 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9" exitCode=0 Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.111732 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9"} Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.111936 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.113170 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.113232 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.113276 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.119337 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6"} Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.119398 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.121157 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.121222 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.121243 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.126810 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d"} Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.126896 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.128419 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.128482 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.128506 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.132202 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6"} Dec 06 05:19:48 crc kubenswrapper[4706]: W1206 05:19:48.196352 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:48 crc kubenswrapper[4706]: E1206 05:19:48.196463 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:48 crc kubenswrapper[4706]: W1206 05:19:48.832939 4706 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:48 crc kubenswrapper[4706]: E1206 05:19:48.833079 4706 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.23:6443: connect: connection refused" logger="UnhandledError" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.876909 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.878524 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.878571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.878587 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.878623 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:48 crc kubenswrapper[4706]: E1206 05:19:48.879281 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.23:6443: connect: connection refused" node="crc" Dec 06 05:19:48 crc kubenswrapper[4706]: I1206 05:19:48.966755 4706 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.23:6443: connect: connection refused Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.138234 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad"} Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.143341 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"275f05f6fac1cdcc29b6596af9c5ede5072c3662cf386ce888edb90dfac1241c"} Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.143385 4706 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.143431 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.143458 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.145577 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.145640 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.145659 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.146268 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.146303 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:49 crc kubenswrapper[4706]: I1206 05:19:49.146318 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.151110 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f10c0032009486ff4d0e728b718ffe298807cee09d3cb8d39cee8795bc927a6a"} Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.151189 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d15849519cedb4b18f33de96a1bfa7f615f304df9215dc973029423c19689eaa"} Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.151214 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"441852619f2ec7f88f40ffeaae94403e82554da0fd0bce732f61ec414a5243f5"} Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.156796 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3"} Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.156948 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.158093 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.158156 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:50 crc kubenswrapper[4706]: I1206 05:19:50.158177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.164757 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.166269 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.166731 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"865e45db3642879f721e641001b0ca8446a1f03ae8a51e0fe361028752bb6178"} Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.166911 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.167601 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.167804 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.167961 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.167706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.168222 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:51 crc kubenswrapper[4706]: I1206 05:19:51.168240 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.168703 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.168830 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.170883 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.170924 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.170937 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.171531 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.171588 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.171601 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:52 crc kubenswrapper[4706]: E1206 05:19:52.567527 4706 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.568398 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.568876 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.570372 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.570418 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:52 crc kubenswrapper[4706]: I1206 05:19:52.570434 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.658268 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.658573 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.660363 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.660409 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.660421 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.927409 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.927662 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.929707 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.929775 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:53 crc kubenswrapper[4706]: I1206 05:19:53.929795 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.689915 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.690170 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.691845 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.691879 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.691891 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.713553 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.713609 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.713735 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.715237 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.715294 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:54 crc kubenswrapper[4706]: I1206 05:19:54.715307 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:55 crc kubenswrapper[4706]: I1206 05:19:55.279907 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:55 crc kubenswrapper[4706]: I1206 05:19:55.281821 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:55 crc kubenswrapper[4706]: I1206 05:19:55.281873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:55 crc kubenswrapper[4706]: I1206 05:19:55.281888 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:55 crc kubenswrapper[4706]: I1206 05:19:55.281928 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.259136 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.259376 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.260819 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.260887 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.260903 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.892207 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:56 crc kubenswrapper[4706]: I1206 05:19:56.900309 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:57 crc kubenswrapper[4706]: I1206 05:19:57.183656 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:57 crc kubenswrapper[4706]: I1206 05:19:57.183905 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:57 crc kubenswrapper[4706]: I1206 05:19:57.184954 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:57 crc kubenswrapper[4706]: I1206 05:19:57.185096 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:57 crc kubenswrapper[4706]: I1206 05:19:57.185186 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:57 crc kubenswrapper[4706]: I1206 05:19:57.189277 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:19:58 crc kubenswrapper[4706]: I1206 05:19:58.186787 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:58 crc kubenswrapper[4706]: I1206 05:19:58.187923 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:58 crc kubenswrapper[4706]: I1206 05:19:58.188034 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:58 crc kubenswrapper[4706]: I1206 05:19:58.188136 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.189589 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.191592 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.191646 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.191665 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.259506 4706 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.259684 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 06 05:19:59 crc kubenswrapper[4706]: E1206 05:19:59.381419 4706 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": net/http: TLS handshake timeout" event="&Event{ObjectMeta:{crc.187e88ac633e4df7 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-06 05:19:37.956593143 +0000 UTC m=+0.284417117,LastTimestamp:2025-12-06 05:19:37.956593143 +0000 UTC m=+0.284417117,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.874532 4706 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.874611 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.892347 4706 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Dec 06 05:19:59 crc kubenswrapper[4706]: I1206 05:19:59.892438 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Dec 06 05:20:02 crc kubenswrapper[4706]: I1206 05:20:02.221538 4706 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 06 05:20:02 crc kubenswrapper[4706]: I1206 05:20:02.221630 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 06 05:20:02 crc kubenswrapper[4706]: E1206 05:20:02.567692 4706 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Dec 06 05:20:03 crc kubenswrapper[4706]: I1206 05:20:03.683191 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Dec 06 05:20:03 crc kubenswrapper[4706]: I1206 05:20:03.684106 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:20:03 crc kubenswrapper[4706]: I1206 05:20:03.685695 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:03 crc kubenswrapper[4706]: I1206 05:20:03.685742 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:03 crc kubenswrapper[4706]: I1206 05:20:03.685755 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:03 crc kubenswrapper[4706]: I1206 05:20:03.699722 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.207466 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.208734 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.209004 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.209222 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.720216 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.720520 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.721956 4706 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.722067 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.722178 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.722246 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.722265 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.725560 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:20:04 crc kubenswrapper[4706]: E1206 05:20:04.872359 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="7s" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.876041 4706 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.876706 4706 trace.go:236] Trace[643993052]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (06-Dec-2025 05:19:54.757) (total time: 10118ms): Dec 06 05:20:04 crc kubenswrapper[4706]: Trace[643993052]: ---"Objects listed" error: 10118ms (05:20:04.876) Dec 06 05:20:04 crc kubenswrapper[4706]: Trace[643993052]: [10.11870288s] [10.11870288s] END Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.876751 4706 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.880266 4706 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Dec 06 05:20:04 crc kubenswrapper[4706]: I1206 05:20:04.881150 4706 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 06 05:20:04 crc kubenswrapper[4706]: E1206 05:20:04.882690 4706 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.210757 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.212079 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.212129 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.212145 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.298198 4706 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:57124->192.168.126.11:17697: read: connection reset by peer" start-of-body= Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.298285 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:57124->192.168.126.11:17697: read: connection reset by peer" Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.948964 4706 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 06 05:20:05 crc kubenswrapper[4706]: I1206 05:20:05.965615 4706 apiserver.go:52] "Watching apiserver" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.056310 4706 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.056833 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.057419 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.057524 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.057601 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.057757 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.057812 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.057931 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.058375 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.058438 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.058360 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.062427 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.062445 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.062432 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.062572 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.064036 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.064095 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.064237 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.064356 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.064485 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.066628 4706 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090253 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090312 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090358 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090397 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090428 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090458 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090844 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090898 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090486 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.090997 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091020 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091011 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091093 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091293 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091352 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091383 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091432 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091690 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091755 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091794 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091843 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091870 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091897 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.091963 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.092134 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.092380 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.092460 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.093412 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.093484 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.093504 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.093775 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.093964 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.094336 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.093522 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.094416 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.094443 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.094753 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.094817 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.094900 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.096145 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.096785 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.096814 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.096835 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097222 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097347 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097393 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097419 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097456 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097476 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097663 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097854 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097952 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.097494 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098028 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098074 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098097 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098123 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098152 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098177 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098169 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104148 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104432 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104694 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.098206 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104847 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104863 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104949 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104987 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.104988 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105078 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105111 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105241 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105285 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105290 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105380 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105382 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105593 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105664 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105702 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105736 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105770 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106231 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106313 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106356 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106415 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106463 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106500 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106536 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106564 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106596 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106632 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106683 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106720 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106756 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106789 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106822 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106857 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106893 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106937 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106967 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106997 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107064 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107262 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107309 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107345 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107378 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107403 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107437 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107469 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107502 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107528 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107576 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107610 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107640 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107673 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107705 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107740 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107764 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107796 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107831 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107862 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107895 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105372 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105612 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107925 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108016 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108065 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108095 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108121 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108150 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108174 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108198 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108227 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108251 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108275 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108300 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108354 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108379 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108405 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108447 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108467 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108493 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108518 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108544 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108565 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108587 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108612 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108632 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108654 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108676 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108702 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108725 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108747 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108772 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110642 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110685 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110715 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110746 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110781 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110815 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113577 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113631 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113969 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114117 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114174 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114217 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114250 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114542 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114840 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115120 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115156 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115221 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115343 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115510 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115664 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115696 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116559 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116629 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116676 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116720 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116756 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116791 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116828 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107936 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105640 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105697 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105833 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.105902 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106148 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106221 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106419 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106409 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106619 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.106720 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107242 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107295 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107572 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107636 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107678 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.107692 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108031 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108277 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108291 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108508 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108666 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.108881 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.109148 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.109543 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.109607 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.109619 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.109910 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110005 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110187 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110435 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110452 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.110845 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111009 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111031 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111131 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111260 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111344 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111486 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111594 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111681 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111706 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.111703 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.112020 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.112746 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.112866 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.112882 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.112928 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113213 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113621 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113674 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113672 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113703 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113749 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.113684 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114028 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114061 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114209 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114222 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114669 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114748 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114782 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.114983 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115040 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115068 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115074 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115347 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115381 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115455 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115467 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115504 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115526 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115795 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.115900 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116014 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116033 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116258 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116506 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116664 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116685 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.116830 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117163 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117227 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117229 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117470 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117875 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117950 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.118202 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117890 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.118505 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119178 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119204 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119541 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119833 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.117269 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119893 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119918 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.119946 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120018 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120260 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120277 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120373 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120483 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120584 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120634 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120650 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120666 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120698 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120730 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120761 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120793 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120823 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120851 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120877 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120912 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120945 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120975 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121001 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121031 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121087 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121113 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121139 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121164 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121187 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121214 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121432 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121502 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121530 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121555 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122660 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122719 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122755 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122792 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122832 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122869 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127913 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127974 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129395 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129474 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129505 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129531 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129554 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129574 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129604 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129629 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129649 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129742 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129776 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129802 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129830 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129858 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129878 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129902 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129937 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129960 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.129983 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130008 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130031 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130086 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130109 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130228 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130241 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130254 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130264 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130279 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130289 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130299 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.120910 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122003 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.122569 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121387 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.124339 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.124443 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.125592 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.125921 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127016 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.121736 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127185 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127395 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127588 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127847 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.127925 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.128262 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.128475 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.128547 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.128764 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130412 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.134982 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.130496 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.130525 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:20:06.63050008 +0000 UTC m=+28.958324024 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.131274 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.131373 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.132086 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.132138 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.133220 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.133229 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.133572 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.133813 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.133841 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.134213 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.134405 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.134214 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.135224 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.135296 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.135369 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.135839 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.135858 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.135905 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136122 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136152 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.136410 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136543 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136580 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.136621 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:06.636586741 +0000 UTC m=+28.964410705 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136622 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136687 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136834 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136873 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136938 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.136992 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.137147 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.137379 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.137446 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.137259 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.137470 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.137452 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:06.637438763 +0000 UTC m=+28.965262707 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138092 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138172 4706 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138422 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138477 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138500 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138523 4706 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138643 4706 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138664 4706 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138716 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138738 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138752 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138764 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138782 4706 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138797 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138811 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138837 4706 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138884 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138900 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138914 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138927 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138944 4706 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138958 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138970 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.138983 4706 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139001 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139014 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139027 4706 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139040 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139078 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139092 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139105 4706 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139122 4706 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139483 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139535 4706 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139550 4706 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139563 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139815 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139826 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139837 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139848 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139862 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139872 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139883 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139896 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139906 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139916 4706 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139926 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139938 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139947 4706 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139958 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139968 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139980 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.139991 4706 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140008 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140018 4706 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140029 4706 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140038 4706 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140099 4706 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140111 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140120 4706 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140132 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140140 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140152 4706 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140161 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140171 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140181 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140194 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140203 4706 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140212 4706 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140221 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140233 4706 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140243 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140253 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140264 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140275 4706 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140283 4706 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140293 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140304 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140328 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140340 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140350 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140361 4706 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140370 4706 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140379 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140392 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140476 4706 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140487 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140497 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140571 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140511 4706 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140622 4706 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140632 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140641 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140652 4706 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140665 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140677 4706 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140690 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140706 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140718 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140729 4706 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140746 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140759 4706 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140771 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140784 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140801 4706 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140813 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140826 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140838 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140855 4706 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140877 4706 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.140890 4706 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141399 4706 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141417 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141427 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141436 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141449 4706 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141458 4706 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141468 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141476 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141487 4706 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141498 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141507 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141519 4706 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141530 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141537 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141546 4706 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141559 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.141568 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.147221 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.148539 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.148892 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.152084 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.152378 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.154513 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.158591 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.159039 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.165875 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.166257 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.168915 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.168940 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.166325 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.168993 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.169018 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.169020 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:06.668997342 +0000 UTC m=+28.996821276 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.166384 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.169113 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:06.669089394 +0000 UTC m=+28.996913348 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.169710 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.170590 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.171648 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.174692 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.178335 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.181986 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.186528 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.203485 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.219293 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.220455 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.224237 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3" exitCode=255 Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.224329 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3"} Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.237877 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.242236 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.242350 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.242422 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.242635 4706 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243236 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243277 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243399 4706 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243426 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243440 4706 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243452 4706 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243463 4706 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243475 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243486 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243498 4706 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243519 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243534 4706 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243550 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243562 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243608 4706 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243635 4706 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243645 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243657 4706 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243669 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243680 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243691 4706 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243703 4706 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243715 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243727 4706 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243737 4706 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243747 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243756 4706 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243767 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243777 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243787 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243797 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243807 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243817 4706 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243827 4706 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243836 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243846 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243857 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243866 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243878 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243887 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243896 4706 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243906 4706 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243915 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243924 4706 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243933 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243946 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243956 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243965 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243975 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243986 4706 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.243996 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244006 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244016 4706 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244030 4706 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244039 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244074 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244085 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244096 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244106 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244116 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244146 4706 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.244156 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.252216 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.263802 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.274555 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.279340 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.280710 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.290996 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.297795 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.297864 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.297805 4706 scope.go:117] "RemoveContainer" containerID="3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.305784 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-zct8k"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.306200 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.307758 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.307944 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.308371 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.309792 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.327333 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.337777 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.348341 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.359632 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.369624 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.378091 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.379567 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.381650 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.389362 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.390318 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.417479 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.429718 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.445923 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/08344ca0-306d-4ff1-81eb-cb9d32a4230a-hosts-file\") pod \"node-resolver-zct8k\" (UID: \"08344ca0-306d-4ff1-81eb-cb9d32a4230a\") " pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.445965 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg7vd\" (UniqueName: \"kubernetes.io/projected/08344ca0-306d-4ff1-81eb-cb9d32a4230a-kube-api-access-xg7vd\") pod \"node-resolver-zct8k\" (UID: \"08344ca0-306d-4ff1-81eb-cb9d32a4230a\") " pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.546467 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg7vd\" (UniqueName: \"kubernetes.io/projected/08344ca0-306d-4ff1-81eb-cb9d32a4230a-kube-api-access-xg7vd\") pod \"node-resolver-zct8k\" (UID: \"08344ca0-306d-4ff1-81eb-cb9d32a4230a\") " pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.546520 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/08344ca0-306d-4ff1-81eb-cb9d32a4230a-hosts-file\") pod \"node-resolver-zct8k\" (UID: \"08344ca0-306d-4ff1-81eb-cb9d32a4230a\") " pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.546591 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/08344ca0-306d-4ff1-81eb-cb9d32a4230a-hosts-file\") pod \"node-resolver-zct8k\" (UID: \"08344ca0-306d-4ff1-81eb-cb9d32a4230a\") " pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.587900 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg7vd\" (UniqueName: \"kubernetes.io/projected/08344ca0-306d-4ff1-81eb-cb9d32a4230a-kube-api-access-xg7vd\") pod \"node-resolver-zct8k\" (UID: \"08344ca0-306d-4ff1-81eb-cb9d32a4230a\") " pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.625521 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-zct8k" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.647547 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.647645 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.647698 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.647801 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:20:07.647756354 +0000 UTC m=+29.975580298 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.647828 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.647898 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:07.647890288 +0000 UTC m=+29.975714232 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.647992 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.648135 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:07.648103293 +0000 UTC m=+29.975927437 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: W1206 05:20:06.654645 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod08344ca0_306d_4ff1_81eb_cb9d32a4230a.slice/crio-bdd985fbb85d6133ffc04312cc08b34a7e25298c3005fdefbf39bc771949e01b WatchSource:0}: Error finding container bdd985fbb85d6133ffc04312cc08b34a7e25298c3005fdefbf39bc771949e01b: Status 404 returned error can't find the container with id bdd985fbb85d6133ffc04312cc08b34a7e25298c3005fdefbf39bc771949e01b Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.715222 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-z27rn"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.715710 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.718135 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.719862 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.727731 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.728070 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.728171 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.728338 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gv2xq"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.729495 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-rtxrp"] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.729813 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.730229 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.733757 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.733979 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.734169 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.734271 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.734405 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.734433 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.734641 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.741009 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.748116 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.748165 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748300 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748317 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748328 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748381 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:07.748367308 +0000 UTC m=+30.076191252 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748400 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748434 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748449 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: E1206 05:20:06.748526 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:07.748506161 +0000 UTC m=+30.076330105 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.753018 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.766653 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.779224 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.790134 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.799197 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.807393 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.820002 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.835757 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.846401 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849463 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-cnibin\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849506 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-cni-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849531 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-socket-dir-parent\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849557 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-system-cni-dir\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849581 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-conf-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849604 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ae6d3c62-ad40-492b-9c35-d0043649cb81-rootfs\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849626 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ae6d3c62-ad40-492b-9c35-d0043649cb81-proxy-tls\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849646 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-netns\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849669 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-multus-certs\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849695 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/87424bac-c58b-4fae-8f44-443e202bf113-cni-binary-copy\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849720 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqtks\" (UniqueName: \"kubernetes.io/projected/87424bac-c58b-4fae-8f44-443e202bf113-kube-api-access-lqtks\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849741 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-k8s-cni-cncf-io\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849781 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849804 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-cni-bin\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849847 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-hostroot\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849868 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-daemon-config\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849890 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-etc-kubernetes\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849906 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-cni-binary-copy\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849920 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-cnibin\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849961 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-cni-multus\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.849992 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-os-release\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850013 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-kubelet\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850035 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnhxc\" (UniqueName: \"kubernetes.io/projected/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-kube-api-access-hnhxc\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850071 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fwxq8\" (UniqueName: \"kubernetes.io/projected/ae6d3c62-ad40-492b-9c35-d0043649cb81-kube-api-access-fwxq8\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850089 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-system-cni-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850116 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/87424bac-c58b-4fae-8f44-443e202bf113-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850133 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ae6d3c62-ad40-492b-9c35-d0043649cb81-mcd-auth-proxy-config\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.850153 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-os-release\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.855826 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.872006 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.883415 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.900838 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.910203 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.918622 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.931624 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.943221 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950783 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-system-cni-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950834 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/87424bac-c58b-4fae-8f44-443e202bf113-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950890 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ae6d3c62-ad40-492b-9c35-d0043649cb81-mcd-auth-proxy-config\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950912 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-os-release\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950936 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-cnibin\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950957 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-cni-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950977 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-socket-dir-parent\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.950994 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-system-cni-dir\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951012 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-conf-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951032 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ae6d3c62-ad40-492b-9c35-d0043649cb81-rootfs\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951062 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ae6d3c62-ad40-492b-9c35-d0043649cb81-proxy-tls\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951081 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-netns\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951099 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-multus-certs\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951118 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/87424bac-c58b-4fae-8f44-443e202bf113-cni-binary-copy\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951111 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-os-release\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951197 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-cnibin\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951309 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-socket-dir-parent\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951350 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-system-cni-dir\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951374 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-conf-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951397 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/ae6d3c62-ad40-492b-9c35-d0043649cb81-rootfs\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951379 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-cni-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951498 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-multus-certs\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951517 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-system-cni-dir\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951540 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-netns\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951140 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqtks\" (UniqueName: \"kubernetes.io/projected/87424bac-c58b-4fae-8f44-443e202bf113-kube-api-access-lqtks\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951589 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-k8s-cni-cncf-io\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951671 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951693 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-cni-bin\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951735 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-hostroot\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951757 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-daemon-config\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951778 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-etc-kubernetes\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951800 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-cni-binary-copy\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951867 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-cnibin\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951886 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-cni-multus\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951911 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-os-release\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951936 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-kubelet\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951956 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnhxc\" (UniqueName: \"kubernetes.io/projected/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-kube-api-access-hnhxc\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951981 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fwxq8\" (UniqueName: \"kubernetes.io/projected/ae6d3c62-ad40-492b-9c35-d0043649cb81-kube-api-access-fwxq8\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.951976 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/ae6d3c62-ad40-492b-9c35-d0043649cb81-mcd-auth-proxy-config\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952114 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-cni-multus\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952127 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-cnibin\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952137 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-cni-bin\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952154 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-var-lib-kubelet\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952175 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-hostroot\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952495 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/87424bac-c58b-4fae-8f44-443e202bf113-cni-binary-copy\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952618 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-os-release\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.952643 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-etc-kubernetes\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.954063 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-multus-daemon-config\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.954179 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-cni-binary-copy\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.954194 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-host-run-k8s-cni-cncf-io\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.954618 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/ae6d3c62-ad40-492b-9c35-d0043649cb81-proxy-tls\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.954890 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/87424bac-c58b-4fae-8f44-443e202bf113-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.954906 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.965950 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqtks\" (UniqueName: \"kubernetes.io/projected/87424bac-c58b-4fae-8f44-443e202bf113-kube-api-access-lqtks\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.967860 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnhxc\" (UniqueName: \"kubernetes.io/projected/f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5-kube-api-access-hnhxc\") pod \"multus-rtxrp\" (UID: \"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\") " pod="openshift-multus/multus-rtxrp" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.968884 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fwxq8\" (UniqueName: \"kubernetes.io/projected/ae6d3c62-ad40-492b-9c35-d0043649cb81-kube-api-access-fwxq8\") pod \"machine-config-daemon-z27rn\" (UID: \"ae6d3c62-ad40-492b-9c35-d0043649cb81\") " pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.973652 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.985919 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:06 crc kubenswrapper[4706]: I1206 05:20:06.998619 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.047911 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.058656 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-rtxrp" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.090392 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-l5xg7"] Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.092147 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.095001 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.095117 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.095487 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.095963 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.096097 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.096313 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.096365 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.110744 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.123540 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.133228 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.146886 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.165869 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.180410 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.191608 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.202733 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.216663 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.229042 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"34fc367e719daa2477a830aecfe50f571e1d9324c3989339c3372343c1d7c000"} Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.229638 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.230018 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-zct8k" event={"ID":"08344ca0-306d-4ff1-81eb-cb9d32a4230a","Type":"ContainerStarted","Data":"bdd985fbb85d6133ffc04312cc08b34a7e25298c3005fdefbf39bc771949e01b"} Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.231223 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"da812e857dd721f233078512cf27a823eb8a7b2461e1f52ab60e1895d9c110d0"} Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.232822 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"5b34ea5b1b5f7d45f7e926a408c7809d85f3732999d683bf75402be14c64bebd"} Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.239125 4706 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.241136 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.254795 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257246 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-etc-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257284 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-kubelet\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257308 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-systemd-units\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257333 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovn-node-metrics-cert\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257388 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257434 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257463 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-ovn-kubernetes\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257488 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-script-lib\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257512 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-slash\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257595 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-systemd\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257736 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-netns\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257825 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-ovn\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257863 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-config\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257926 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-var-lib-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.257979 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-log-socket\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.258017 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-env-overrides\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.258129 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xwsz\" (UniqueName: \"kubernetes.io/projected/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-kube-api-access-7xwsz\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.258214 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-netd\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.258253 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-node-log\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.258319 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-bin\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.274590 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360028 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360177 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360248 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-ovn-kubernetes\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360287 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-script-lib\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360281 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360381 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360330 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-slash\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360578 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-systemd\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360411 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-slash\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360692 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-systemd\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360733 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-netns\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360347 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-ovn-kubernetes\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360811 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-netns\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360812 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-ovn\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360874 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-config\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360898 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-ovn\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360917 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-var-lib-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.360973 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-env-overrides\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.361017 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-log-socket\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.361112 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xwsz\" (UniqueName: \"kubernetes.io/projected/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-kube-api-access-7xwsz\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.361153 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-node-log\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.361190 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-bin\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.361935 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-netd\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362006 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-script-lib\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362074 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-etc-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362140 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-kubelet\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362223 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-systemd-units\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362279 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovn-node-metrics-cert\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362646 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-env-overrides\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362644 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-var-lib-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362742 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-netd\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362750 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-log-socket\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362822 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-bin\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362821 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-node-log\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362879 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-kubelet\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362930 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-etc-openvswitch\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.362938 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-systemd-units\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.363404 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-config\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.368610 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovn-node-metrics-cert\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.390909 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xwsz\" (UniqueName: \"kubernetes.io/projected/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-kube-api-access-7xwsz\") pod \"ovnkube-node-l5xg7\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.413795 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.473065 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/87424bac-c58b-4fae-8f44-443e202bf113-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gv2xq\" (UID: \"87424bac-c58b-4fae-8f44-443e202bf113\") " pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:07 crc kubenswrapper[4706]: W1206 05:20:07.493577 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4a3b88d_ed57_4b99_89d1_9a3d1ea8a0a5.slice/crio-1304bcdf035fd08cbc3bba0db2b70df8651a16589109f8a8eb71897668a15fb4 WatchSource:0}: Error finding container 1304bcdf035fd08cbc3bba0db2b70df8651a16589109f8a8eb71897668a15fb4: Status 404 returned error can't find the container with id 1304bcdf035fd08cbc3bba0db2b70df8651a16589109f8a8eb71897668a15fb4 Dec 06 05:20:07 crc kubenswrapper[4706]: W1206 05:20:07.494386 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae6d3c62_ad40_492b_9c35_d0043649cb81.slice/crio-66e2d52cd2c2a4c221ca55cd80d8bb9e6d6c4b9cfdd662413bdfadb53358c63f WatchSource:0}: Error finding container 66e2d52cd2c2a4c221ca55cd80d8bb9e6d6c4b9cfdd662413bdfadb53358c63f: Status 404 returned error can't find the container with id 66e2d52cd2c2a4c221ca55cd80d8bb9e6d6c4b9cfdd662413bdfadb53358c63f Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.663790 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.669159 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.669316 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:20:09.66929166 +0000 UTC m=+31.997115604 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.669356 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.669435 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.669571 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.669619 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:09.669612738 +0000 UTC m=+31.997436682 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.669709 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.669874 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:09.669847505 +0000 UTC m=+31.997671459 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: W1206 05:20:07.677221 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87424bac_c58b_4fae_8f44_443e202bf113.slice/crio-6a7b8d7fe1f5de10e066b2ad449ca98d42f433d4f5a567c811d414398cbbde89 WatchSource:0}: Error finding container 6a7b8d7fe1f5de10e066b2ad449ca98d42f433d4f5a567c811d414398cbbde89: Status 404 returned error can't find the container with id 6a7b8d7fe1f5de10e066b2ad449ca98d42f433d4f5a567c811d414398cbbde89 Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.770270 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:07 crc kubenswrapper[4706]: I1206 05:20:07.770311 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.770479 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.770495 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.770508 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.770569 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:09.770554881 +0000 UTC m=+32.098378825 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.772058 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.772325 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.772347 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:07 crc kubenswrapper[4706]: E1206 05:20:07.772908 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:09.772886363 +0000 UTC m=+32.100710307 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.035416 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.035424 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.035463 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:08 crc kubenswrapper[4706]: E1206 05:20:08.035598 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:08 crc kubenswrapper[4706]: E1206 05:20:08.035725 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:08 crc kubenswrapper[4706]: E1206 05:20:08.035901 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.040670 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.041426 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.043425 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.044432 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.045940 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.046696 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.047719 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.049074 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.049890 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.051147 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.051793 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.053016 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.053496 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.054256 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.054981 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.056288 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.057013 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.058378 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.058948 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.059862 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.061989 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.062696 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.064165 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.064739 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.065094 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.066964 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.067732 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.068719 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.070572 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.071510 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.073292 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.074185 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.075684 4706 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.075862 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.077476 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.078518 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.080345 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.081035 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.084335 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.085325 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.086379 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.087025 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.088244 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.088748 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.089731 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.090525 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.091772 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.092664 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.094175 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.094856 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.096109 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.096251 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.096653 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.097561 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.098187 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.098737 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.099730 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.100257 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.114257 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.137503 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.152451 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.164010 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.179672 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.197470 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.216213 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.237728 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.237778 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.239492 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-zct8k" event={"ID":"08344ca0-306d-4ff1-81eb-cb9d32a4230a","Type":"ContainerStarted","Data":"187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.240955 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.242733 4706 generic.go:334] "Generic (PLEG): container finished" podID="87424bac-c58b-4fae-8f44-443e202bf113" containerID="34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981" exitCode=0 Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.242781 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerDied","Data":"34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.242798 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerStarted","Data":"6a7b8d7fe1f5de10e066b2ad449ca98d42f433d4f5a567c811d414398cbbde89"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.245931 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.248195 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.248938 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.250261 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerStarted","Data":"c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.250295 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerStarted","Data":"1304bcdf035fd08cbc3bba0db2b70df8651a16589109f8a8eb71897668a15fb4"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.252793 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" exitCode=0 Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.252846 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.252867 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"e88ce5555f9b3f6c0c60cb5a3aa1d1ce51ec2f33b9c49b87e84d51b52cdfd55f"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.255334 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.255365 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.255376 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"66e2d52cd2c2a4c221ca55cd80d8bb9e6d6c4b9cfdd662413bdfadb53358c63f"} Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.258012 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.282510 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.321996 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.344734 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.362127 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.380750 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.401852 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.423884 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.443675 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.475061 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.497398 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.520610 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.536128 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.548195 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.570084 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.590894 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-mtbkm"] Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.591533 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.593144 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.594235 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.594432 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.594653 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.607882 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.620905 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.639645 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.659814 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.678857 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.690835 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.702210 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.715615 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.728007 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.748647 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.782358 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-886hm\" (UniqueName: \"kubernetes.io/projected/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-kube-api-access-886hm\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.782607 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-host\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.782695 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-serviceca\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.789374 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.830784 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.870426 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.883457 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-host\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.883638 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-serviceca\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.883771 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-886hm\" (UniqueName: \"kubernetes.io/projected/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-kube-api-access-886hm\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.883610 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-host\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.885025 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-serviceca\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.911530 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:08 crc kubenswrapper[4706]: I1206 05:20:08.941455 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-886hm\" (UniqueName: \"kubernetes.io/projected/4afb28f7-2b2c-4ca3-bf32-30f314fa6d13-kube-api-access-886hm\") pod \"node-ca-mtbkm\" (UID: \"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\") " pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.030905 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-mtbkm" Dec 06 05:20:09 crc kubenswrapper[4706]: W1206 05:20:09.050654 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4afb28f7_2b2c_4ca3_bf32_30f314fa6d13.slice/crio-a639b2c5ee95ac608bdacf5dd3d6fa701502d3836655137cae5ba0c893900190 WatchSource:0}: Error finding container a639b2c5ee95ac608bdacf5dd3d6fa701502d3836655137cae5ba0c893900190: Status 404 returned error can't find the container with id a639b2c5ee95ac608bdacf5dd3d6fa701502d3836655137cae5ba0c893900190 Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.265586 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.266152 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.266170 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.268155 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-mtbkm" event={"ID":"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13","Type":"ContainerStarted","Data":"a639b2c5ee95ac608bdacf5dd3d6fa701502d3836655137cae5ba0c893900190"} Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.275021 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerStarted","Data":"45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a"} Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.303883 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.327688 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.353116 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.366905 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.382453 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.399840 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.444653 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.468060 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.503943 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.541709 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.587140 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.612263 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.628937 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.644788 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:09Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.691382 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.691621 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:20:13.691582229 +0000 UTC m=+36.019406213 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.691730 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.691829 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.691949 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.692069 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:13.692036501 +0000 UTC m=+36.019860445 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.692193 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.692365 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:13.692336099 +0000 UTC m=+36.020160173 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.792555 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:09 crc kubenswrapper[4706]: I1206 05:20:09.792598 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792741 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792760 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792771 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792807 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792845 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792861 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792827 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:13.792810249 +0000 UTC m=+36.120634193 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:09 crc kubenswrapper[4706]: E1206 05:20:09.792929 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:13.792917882 +0000 UTC m=+36.120741816 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.035384 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:10 crc kubenswrapper[4706]: E1206 05:20:10.035583 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.035689 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:10 crc kubenswrapper[4706]: E1206 05:20:10.035764 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.035841 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:10 crc kubenswrapper[4706]: E1206 05:20:10.035924 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.281616 4706 generic.go:334] "Generic (PLEG): container finished" podID="87424bac-c58b-4fae-8f44-443e202bf113" containerID="45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a" exitCode=0 Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.281739 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerDied","Data":"45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a"} Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.284135 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89"} Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.289976 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.290065 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.290087 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.292088 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-mtbkm" event={"ID":"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13","Type":"ContainerStarted","Data":"645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721"} Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.308759 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.324856 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.344206 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.357751 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.375754 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.391326 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.411605 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.433863 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.448757 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.467535 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.481661 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.505655 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.520389 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.543256 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.559355 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.579975 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.592788 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.608003 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.618342 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.633649 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.647584 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.659812 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.673668 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.688556 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.700403 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.713674 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.722306 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:10 crc kubenswrapper[4706]: I1206 05:20:10.735801 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:10Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.299394 4706 generic.go:334] "Generic (PLEG): container finished" podID="87424bac-c58b-4fae-8f44-443e202bf113" containerID="492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf" exitCode=0 Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.299489 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerDied","Data":"492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf"} Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.317879 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.331230 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.346187 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.361172 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.379139 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.392901 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.407590 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.420808 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.438680 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.454072 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.471749 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.488250 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.507378 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.520759 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.883703 4706 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.885890 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.885948 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.885969 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.886183 4706 kubelet_node_status.go:76] "Attempting to register node" node="crc" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.894177 4706 kubelet_node_status.go:115] "Node was previously registered" node="crc" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.894479 4706 kubelet_node_status.go:79] "Successfully registered node" node="crc" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.895584 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.895656 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.895680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.895713 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.895775 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:11Z","lastTransitionTime":"2025-12-06T05:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:11 crc kubenswrapper[4706]: E1206 05:20:11.911848 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.917324 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.917405 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.917420 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.917442 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.917457 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:11Z","lastTransitionTime":"2025-12-06T05:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:11 crc kubenswrapper[4706]: E1206 05:20:11.934305 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.938921 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.938983 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.938997 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.939020 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.939036 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:11Z","lastTransitionTime":"2025-12-06T05:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:11 crc kubenswrapper[4706]: E1206 05:20:11.958943 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.964354 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.964413 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.964433 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.964461 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.964481 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:11Z","lastTransitionTime":"2025-12-06T05:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:11 crc kubenswrapper[4706]: E1206 05:20:11.981132 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.986029 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.986124 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.986151 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.986173 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:11 crc kubenswrapper[4706]: I1206 05:20:11.986190 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:11Z","lastTransitionTime":"2025-12-06T05:20:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: E1206 05:20:12.002431 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:11Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: E1206 05:20:12.002712 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.004913 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.004953 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.004968 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.004988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.005002 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.036137 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.036177 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.036182 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:12 crc kubenswrapper[4706]: E1206 05:20:12.036384 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:12 crc kubenswrapper[4706]: E1206 05:20:12.036536 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:12 crc kubenswrapper[4706]: E1206 05:20:12.036705 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.108559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.108627 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.108654 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.108690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.108719 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.212484 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.212568 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.212599 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.212638 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.212666 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.306702 4706 generic.go:334] "Generic (PLEG): container finished" podID="87424bac-c58b-4fae-8f44-443e202bf113" containerID="d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7" exitCode=0 Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.306847 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerDied","Data":"d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.314713 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.314750 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.314763 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.314783 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.314796 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.336613 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.354741 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.379210 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.397380 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.419112 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.419165 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.419184 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.419214 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.419235 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.420279 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.464938 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.483968 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.503233 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.523248 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.523322 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.523345 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.523377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.523400 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.527807 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.547460 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.572915 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.598538 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.623097 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.629436 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.629507 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.629529 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.629562 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.629594 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.645410 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.734510 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.734561 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.734577 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.734603 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.734621 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.838790 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.838848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.838867 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.838896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.838916 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.943963 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.944097 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.944126 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.944160 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:12 crc kubenswrapper[4706]: I1206 05:20:12.944184 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:12Z","lastTransitionTime":"2025-12-06T05:20:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.047828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.047889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.047899 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.047921 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.047934 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.150148 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.150204 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.150219 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.150239 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.150252 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.252717 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.252752 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.252760 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.252775 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.252785 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.316120 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.319835 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerStarted","Data":"cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.342470 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.355256 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.355306 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.355322 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.355346 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.355366 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.358693 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.425073 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.443882 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.458340 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.458385 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.458397 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.458415 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.458427 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.461080 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.474523 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.488263 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.505910 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.523529 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.540990 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.555195 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.561281 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.561341 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.561356 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.561377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.561391 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.568789 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.584982 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.597487 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.664527 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.664588 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.664608 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.664628 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.664642 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.737679 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.737846 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.737917 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.737994 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.738036 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.738085 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:20:21.737999955 +0000 UTC m=+44.065823939 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.738150 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:21.738133599 +0000 UTC m=+44.065957583 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.738194 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:21.73818015 +0000 UTC m=+44.066004134 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.767383 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.767430 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.767442 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.767465 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.767480 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.839712 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.839818 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.839981 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840008 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840086 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840107 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840023 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840167 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840180 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:21.840155961 +0000 UTC m=+44.167979945 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:13 crc kubenswrapper[4706]: E1206 05:20:13.840244 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:21.840218112 +0000 UTC m=+44.168042146 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.870388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.870442 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.870465 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.870491 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.870512 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.973306 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.973354 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.973371 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.973388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:13 crc kubenswrapper[4706]: I1206 05:20:13.973400 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:13Z","lastTransitionTime":"2025-12-06T05:20:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.036337 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:14 crc kubenswrapper[4706]: E1206 05:20:14.036588 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.036360 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:14 crc kubenswrapper[4706]: E1206 05:20:14.036798 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.036360 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:14 crc kubenswrapper[4706]: E1206 05:20:14.037085 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.076556 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.076615 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.076629 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.076651 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.076664 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.179999 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.180123 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.180143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.180173 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.180193 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.285564 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.285675 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.285694 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.285725 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.285745 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.389706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.389801 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.389821 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.389853 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.389878 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.495416 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.495527 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.495547 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.495578 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.495609 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.606887 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.606966 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.606993 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.607030 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.607097 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.710211 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.710276 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.710295 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.710323 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.710345 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.813219 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.813320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.813342 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.813369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.813392 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.916639 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.916704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.916719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.916738 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:14 crc kubenswrapper[4706]: I1206 05:20:14.916748 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:14Z","lastTransitionTime":"2025-12-06T05:20:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.020268 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.020336 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.020363 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.020393 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.020416 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.123652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.123727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.123753 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.123783 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.123802 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.227203 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.227278 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.227302 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.227335 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.227390 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.330737 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.330873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.330903 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.330935 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.330960 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.434960 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.435063 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.435077 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.435100 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.435112 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.538943 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.539092 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.539133 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.539171 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.539193 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.641889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.641957 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.641978 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.642006 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.642024 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.745092 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.745180 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.745215 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.745248 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.745270 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.849394 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.849480 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.849508 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.849544 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.849586 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.953233 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.953320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.953339 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.953369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:15 crc kubenswrapper[4706]: I1206 05:20:15.953389 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:15Z","lastTransitionTime":"2025-12-06T05:20:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.035623 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.035681 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.035843 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:16 crc kubenswrapper[4706]: E1206 05:20:16.036041 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:16 crc kubenswrapper[4706]: E1206 05:20:16.036260 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:16 crc kubenswrapper[4706]: E1206 05:20:16.036481 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.056851 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.056897 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.056915 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.056938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.056956 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.160500 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.160574 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.160603 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.160636 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.160661 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.263778 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.264290 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.264305 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.264326 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.264339 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.367745 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.367868 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.367893 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.367925 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.367953 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.471406 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.471499 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.471525 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.471557 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.471583 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.574778 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.574834 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.574848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.574873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.574890 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.678561 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.678640 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.678659 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.678687 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.678706 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.782623 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.782704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.782727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.782762 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.782785 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.887288 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.887345 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.887360 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.887383 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.887396 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.991631 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.991736 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.991762 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.991796 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:16 crc kubenswrapper[4706]: I1206 05:20:16.991817 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:16Z","lastTransitionTime":"2025-12-06T05:20:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.095246 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.095318 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.095338 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.095368 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.095388 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.199323 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.199411 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.199437 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.199476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.199500 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.303710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.303781 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.303799 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.303829 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.303849 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.407280 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.407373 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.407399 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.407434 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.407461 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.541223 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.541316 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.541342 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.541382 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.541410 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.644251 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.644341 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.644360 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.644391 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.644410 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.677723 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.704610 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.725858 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.745860 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.747529 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.747586 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.747612 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.747652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.747677 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.765785 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.783018 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.804241 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.826464 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.844787 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.850813 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.850891 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.850911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.850945 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.850965 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.864560 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.886005 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.912055 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.932430 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.954405 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.954471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.954490 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.954517 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.954537 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:17Z","lastTransitionTime":"2025-12-06T05:20:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:17 crc kubenswrapper[4706]: I1206 05:20:17.956005 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:17Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.035562 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.035593 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.035808 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:18 crc kubenswrapper[4706]: E1206 05:20:18.035986 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:18 crc kubenswrapper[4706]: E1206 05:20:18.036757 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:18 crc kubenswrapper[4706]: E1206 05:20:18.036997 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.060680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.060760 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.060788 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.060820 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.060847 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.166211 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.166257 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.166268 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.166287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.166300 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.168960 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.193796 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.210178 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.223326 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.238131 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.257933 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.269311 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.269383 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.269413 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.269449 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.269475 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.277831 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.297454 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.315025 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.340600 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.363959 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.372861 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.372902 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.372914 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.372936 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.372949 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.380365 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.400032 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.417914 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.438863 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.476643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.476690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.476703 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.476723 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.476739 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.579958 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.580019 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.580037 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.580067 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.580114 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.683535 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.683795 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.683826 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.683859 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.683883 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.787209 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.787293 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.787314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.787343 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.787366 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.893294 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.893347 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.893358 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.893377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.893388 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.997719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.997784 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.997797 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.997822 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:18 crc kubenswrapper[4706]: I1206 05:20:18.997842 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:18Z","lastTransitionTime":"2025-12-06T05:20:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.101669 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.101729 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.101743 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.101767 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.101783 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.205277 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.205355 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.205374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.205404 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.205424 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.309485 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.309528 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.309541 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.309562 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.309574 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.413205 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.413294 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.413314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.413348 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.413367 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.516487 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.516575 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.516601 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.516634 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.516659 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.619768 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.619840 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.619877 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.619910 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.619937 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.683524 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q"] Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.686647 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.692075 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.693595 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.704421 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.720300 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.725559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.725612 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.725623 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.725641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.725652 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.738511 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.754450 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.765421 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.774575 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/95f64f73-47d5-4156-8eb5-539fa23b4202-env-overrides\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.774623 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcczw\" (UniqueName: \"kubernetes.io/projected/95f64f73-47d5-4156-8eb5-539fa23b4202-kube-api-access-qcczw\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.774652 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/95f64f73-47d5-4156-8eb5-539fa23b4202-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.774692 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/95f64f73-47d5-4156-8eb5-539fa23b4202-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.782039 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.797272 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.814940 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.825987 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.827617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.827660 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.827668 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.827687 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.827697 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.839715 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.852763 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.865565 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.875999 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/95f64f73-47d5-4156-8eb5-539fa23b4202-env-overrides\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.876042 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/95f64f73-47d5-4156-8eb5-539fa23b4202-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.876090 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcczw\" (UniqueName: \"kubernetes.io/projected/95f64f73-47d5-4156-8eb5-539fa23b4202-kube-api-access-qcczw\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.876136 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/95f64f73-47d5-4156-8eb5-539fa23b4202-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.876761 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/95f64f73-47d5-4156-8eb5-539fa23b4202-env-overrides\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.877089 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/95f64f73-47d5-4156-8eb5-539fa23b4202-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.882375 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/95f64f73-47d5-4156-8eb5-539fa23b4202-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.889910 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.894102 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcczw\" (UniqueName: \"kubernetes.io/projected/95f64f73-47d5-4156-8eb5-539fa23b4202-kube-api-access-qcczw\") pod \"ovnkube-control-plane-749d76644c-4k52q\" (UID: \"95f64f73-47d5-4156-8eb5-539fa23b4202\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.909971 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.925345 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:19Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.931352 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.931387 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.931397 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.931415 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:19 crc kubenswrapper[4706]: I1206 05:20:19.931427 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:19Z","lastTransitionTime":"2025-12-06T05:20:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.034060 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.034120 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.034131 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.034149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.034162 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.035310 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.035419 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.035454 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:20 crc kubenswrapper[4706]: E1206 05:20:20.035459 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:20 crc kubenswrapper[4706]: E1206 05:20:20.035641 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.035795 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" Dec 06 05:20:20 crc kubenswrapper[4706]: E1206 05:20:20.035843 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.140548 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.140596 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.140605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.140623 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.140634 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.244111 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.244160 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.244169 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.244186 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.244200 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.347087 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.347132 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.347144 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.347162 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.347174 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.349276 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" event={"ID":"95f64f73-47d5-4156-8eb5-539fa23b4202","Type":"ContainerStarted","Data":"f730568d9ed6aa85fe2d20d0c52510dca372fdbf8b15786825f77317b324124d"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.450115 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.450183 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.450202 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.450233 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.450259 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.553483 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.553570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.553595 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.553619 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.553639 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.657422 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.657479 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.657488 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.657522 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.657535 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.760777 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.760851 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.760867 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.760892 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.760908 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.863563 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.863605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.863616 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.863632 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.863643 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.966649 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.966699 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.966899 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.966920 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:20 crc kubenswrapper[4706]: I1206 05:20:20.966932 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:20Z","lastTransitionTime":"2025-12-06T05:20:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.070790 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.070864 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.070885 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.070915 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.070933 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.174848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.174945 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.174964 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.174993 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.175015 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.265361 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-4ltjs"] Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.266119 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.266227 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.277882 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.277936 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.277958 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.277988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.278011 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.281295 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.294400 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.308103 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.327488 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.343697 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.358959 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.372000 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.380612 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.380673 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.380683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.380701 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.380711 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.385407 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.401352 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.401446 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcf4l\" (UniqueName: \"kubernetes.io/projected/f4065785-c72e-4c45-ab51-ce292be4f2ed-kube-api-access-vcf4l\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.403263 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.416957 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.432821 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.446030 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.461680 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.483763 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.483835 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.483850 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.483878 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.483896 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.485439 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.502642 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcf4l\" (UniqueName: \"kubernetes.io/projected/f4065785-c72e-4c45-ab51-ce292be4f2ed-kube-api-access-vcf4l\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.502693 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.502854 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.502915 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:20:22.002895324 +0000 UTC m=+44.330719268 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.506206 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.519959 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcf4l\" (UniqueName: \"kubernetes.io/projected/f4065785-c72e-4c45-ab51-ce292be4f2ed-kube-api-access-vcf4l\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.533757 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:21Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.586476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.586525 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.586538 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.586559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.586574 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.689486 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.689525 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.689535 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.689557 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.689571 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.793014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.793098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.793138 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.793161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.793171 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.806534 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.806688 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.806732 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.806828 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.806887 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:37.806869222 +0000 UTC m=+60.134693166 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.807137 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.807276 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:37.807248972 +0000 UTC m=+60.135072916 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.807647 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:20:37.807601531 +0000 UTC m=+60.135425475 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.896783 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.896832 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.896842 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.896860 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.896873 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:21Z","lastTransitionTime":"2025-12-06T05:20:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.907357 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:21 crc kubenswrapper[4706]: I1206 05:20:21.907397 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907524 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907541 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907551 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907590 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:37.907575829 +0000 UTC m=+60.235399773 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907525 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907629 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907640 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:21 crc kubenswrapper[4706]: E1206 05:20:21.907667 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:20:37.907658601 +0000 UTC m=+60.235482545 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.000145 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.000206 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.000228 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.000262 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.000287 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.008106 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.008388 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.008503 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:20:23.00846909 +0000 UTC m=+45.336293074 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.014961 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.015035 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.015094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.015133 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.015158 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.036241 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.036475 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.036250 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.036241 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.036700 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.037038 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.037355 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.043570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.043635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.043662 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.043692 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.043715 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.066612 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.072017 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.072122 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.072150 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.072182 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.072202 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.093380 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.098579 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.098643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.098663 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.098688 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.098706 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.120071 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.126164 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.126235 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.126257 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.126283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.126303 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.147469 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: E1206 05:20:22.147709 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.149938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.149993 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.150011 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.150033 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.150092 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.253325 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.253413 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.253438 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.253469 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.253529 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.356531 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.356612 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.356696 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.356737 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.356760 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.365366 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.366019 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.366248 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.375257 4706 generic.go:334] "Generic (PLEG): container finished" podID="87424bac-c58b-4fae-8f44-443e202bf113" containerID="cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135" exitCode=0 Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.375425 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerDied","Data":"cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.383934 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" event={"ID":"95f64f73-47d5-4156-8eb5-539fa23b4202","Type":"ContainerStarted","Data":"418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.391204 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.439336 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.439425 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.439815 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.459848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.459899 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.459915 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.459938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.459956 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.473321 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.493672 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.516898 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.532846 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.549002 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.562812 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.562871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.562889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.562917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.562937 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.565626 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.586245 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.604518 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.622633 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.638196 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.656318 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.666030 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.666198 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.666218 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.666248 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.666268 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.677135 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.699445 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.714499 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.732242 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.750535 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.769059 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.776325 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.776369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.776384 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.776404 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.776415 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.790626 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.805233 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.819436 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.834439 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.845903 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.857803 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.871043 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.881997 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.882026 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.882036 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.882073 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.882086 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.887404 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.897047 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.909858 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.923971 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.935635 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.961896 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:22Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.984733 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.984770 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.984781 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.984800 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:22 crc kubenswrapper[4706]: I1206 05:20:22.984812 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:22Z","lastTransitionTime":"2025-12-06T05:20:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.022265 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:23 crc kubenswrapper[4706]: E1206 05:20:23.022486 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:23 crc kubenswrapper[4706]: E1206 05:20:23.022574 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:20:25.022549147 +0000 UTC m=+47.350373091 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.035742 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:23 crc kubenswrapper[4706]: E1206 05:20:23.035888 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.088758 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.088835 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.088856 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.088890 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.088918 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.193644 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.193740 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.193764 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.193798 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.193823 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.296800 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.296871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.296889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.296914 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.296934 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.418614 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.418681 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.418690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.418707 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.418718 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.419976 4706 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.522154 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.522215 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.522228 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.522252 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.522271 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.625840 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.625917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.625944 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.625984 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.626010 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.729699 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.729751 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.729766 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.729790 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.729804 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.833090 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.833409 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.833538 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.833569 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.833587 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.937356 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.937418 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.937435 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.937460 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:23 crc kubenswrapper[4706]: I1206 05:20:23.937477 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:23Z","lastTransitionTime":"2025-12-06T05:20:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.035839 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.035916 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:24 crc kubenswrapper[4706]: E1206 05:20:24.036089 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.036142 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:24 crc kubenswrapper[4706]: E1206 05:20:24.036223 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:24 crc kubenswrapper[4706]: E1206 05:20:24.036351 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.041524 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.041585 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.041597 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.041683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.041707 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.145780 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.145884 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.145905 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.145934 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.145953 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.249387 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.249439 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.249454 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.249479 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.249500 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.352817 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.352867 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.352881 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.352903 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.352919 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.425335 4706 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.456641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.456731 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.456754 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.456789 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.456814 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.559863 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.559903 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.559913 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.559931 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.559943 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.662682 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.662782 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.662796 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.662816 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.662828 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.765886 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.765957 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.766414 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.766475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.766494 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.868951 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.868995 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.869006 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.869024 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.869039 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.972274 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.972336 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.972350 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.972374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:24 crc kubenswrapper[4706]: I1206 05:20:24.972391 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:24Z","lastTransitionTime":"2025-12-06T05:20:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.035272 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:25 crc kubenswrapper[4706]: E1206 05:20:25.035491 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.046414 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:25 crc kubenswrapper[4706]: E1206 05:20:25.046737 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:25 crc kubenswrapper[4706]: E1206 05:20:25.046889 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:20:29.046850949 +0000 UTC m=+51.374674923 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.075927 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.076005 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.076029 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.076109 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.076139 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.180497 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.180548 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.180562 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.180584 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.180599 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.236082 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.284807 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.285582 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.285643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.285694 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.285718 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.389320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.389416 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.389441 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.389475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.389502 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.446094 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerStarted","Data":"c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.493438 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.493527 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.493546 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.493580 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.493643 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.597336 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.597475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.597553 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.597589 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.597609 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.700746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.700811 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.700831 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.700859 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.700878 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.804845 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.804906 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.804931 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.804959 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.804978 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.907838 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.907904 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.907922 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.907951 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:25 crc kubenswrapper[4706]: I1206 05:20:25.907974 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:25Z","lastTransitionTime":"2025-12-06T05:20:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.011246 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.011302 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.011320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.011344 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.011361 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.035926 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.035926 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.036125 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:26 crc kubenswrapper[4706]: E1206 05:20:26.036192 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:26 crc kubenswrapper[4706]: E1206 05:20:26.036295 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:26 crc kubenswrapper[4706]: E1206 05:20:26.036433 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.114568 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.114630 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.114649 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.114674 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.114731 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.217932 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.217985 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.218003 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.218028 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.218046 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.321493 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.321580 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.321605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.321640 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.321662 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.426308 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.426411 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.426436 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.426471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.426494 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.529739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.530281 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.530497 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.530686 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.530894 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.634966 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.636012 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.636414 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.636840 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.637286 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.741461 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.741528 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.741547 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.741635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.741714 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.845573 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.845639 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.845658 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.845685 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.845705 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.948896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.948965 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.948984 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.949013 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:26 crc kubenswrapper[4706]: I1206 05:20:26.949033 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:26Z","lastTransitionTime":"2025-12-06T05:20:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.036349 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:27 crc kubenswrapper[4706]: E1206 05:20:27.036661 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.052926 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.053011 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.053037 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.053110 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.053146 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.155747 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.155796 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.155807 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.155825 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.155838 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.259434 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.259509 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.259529 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.259554 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.259570 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.362876 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.362939 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.362951 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.362973 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.362991 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.465434 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.465512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.465532 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.465561 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.465581 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.569227 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.569284 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.569297 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.569319 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.569334 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.673139 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.673187 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.673203 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.673229 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.673247 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.776282 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.776366 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.776388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.776426 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.776454 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.880333 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.880436 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.880456 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.880486 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.880508 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.983949 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.984022 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.984085 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.984124 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:27 crc kubenswrapper[4706]: I1206 05:20:27.984149 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:27Z","lastTransitionTime":"2025-12-06T05:20:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.036081 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.036287 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:28 crc kubenswrapper[4706]: E1206 05:20:28.036554 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:28 crc kubenswrapper[4706]: E1206 05:20:28.036710 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.036708 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:28 crc kubenswrapper[4706]: E1206 05:20:28.036899 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.056472 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.077365 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.087619 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.087683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.087701 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.087727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.087745 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.098429 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.122351 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.140861 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.162846 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.176849 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.190704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.190750 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.190763 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.190780 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.190792 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.197252 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.208022 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.227862 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.241973 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.253618 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.264422 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.276597 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.289607 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.293950 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.294097 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.294119 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.294149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.294169 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.309391 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.397498 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.397596 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.397624 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.397659 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.397684 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.499809 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.499857 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.499868 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.499885 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.499898 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.603457 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.603527 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.603545 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.603571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.603589 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.707248 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.707317 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.707334 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.707364 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.707384 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.810487 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.810558 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.810576 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.810603 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.810622 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.914695 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.914770 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.914790 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.914814 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:28 crc kubenswrapper[4706]: I1206 05:20:28.914833 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:28Z","lastTransitionTime":"2025-12-06T05:20:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.017883 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.017956 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.017975 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.018005 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.018024 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.035501 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:29 crc kubenswrapper[4706]: E1206 05:20:29.035727 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.098591 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:29 crc kubenswrapper[4706]: E1206 05:20:29.098885 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:29 crc kubenswrapper[4706]: E1206 05:20:29.099014 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:20:37.098981529 +0000 UTC m=+59.426805513 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.121489 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.121564 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.121585 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.121611 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.121637 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.225141 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.225232 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.225261 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.225296 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.225322 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.329604 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.329682 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.329710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.329746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.329766 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.433320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.433394 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.433412 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.433441 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.433463 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.537194 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.537270 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.537294 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.537329 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.537354 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.640020 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.640094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.640110 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.640153 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.640172 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.744182 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.744241 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.744264 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.744294 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.744315 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.848488 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.848599 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.848618 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.848643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.848664 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.952727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.952794 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.952812 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.952839 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:29 crc kubenswrapper[4706]: I1206 05:20:29.952859 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:29Z","lastTransitionTime":"2025-12-06T05:20:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.035404 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:30 crc kubenswrapper[4706]: E1206 05:20:30.035626 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.036331 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:30 crc kubenswrapper[4706]: E1206 05:20:30.036473 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.036723 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:30 crc kubenswrapper[4706]: E1206 05:20:30.037034 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.056367 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.056460 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.056482 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.056515 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.056535 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.158756 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.158835 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.158848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.158867 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.158881 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.262798 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.262847 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.262857 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.262878 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.262889 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.367193 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.367252 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.367269 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.367298 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.367317 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.479437 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.479507 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.479535 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.479571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.479597 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.583313 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.583380 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.583399 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.583428 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.583452 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.686778 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.686857 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.686877 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.686905 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.686925 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.789450 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.789503 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.789520 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.789546 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.789564 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.893211 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.893627 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.893663 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.893694 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.893716 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.996989 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.997074 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.997094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.997120 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:30 crc kubenswrapper[4706]: I1206 05:20:30.997141 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:30Z","lastTransitionTime":"2025-12-06T05:20:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.035722 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:31 crc kubenswrapper[4706]: E1206 05:20:31.035913 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.100543 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.100592 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.100610 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.100636 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.100654 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.204967 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.205076 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.205095 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.205125 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.205145 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.309423 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.309500 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.309528 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.309571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.309593 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.413026 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.413176 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.413214 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.413426 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.413639 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.518805 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.518898 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.518919 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.518947 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.518968 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.622699 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.622779 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.622804 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.622835 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.622854 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.726620 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.726711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.726763 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.726791 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.726808 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.830589 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.830645 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.830661 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.830686 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.830704 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.933837 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.933909 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.933932 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.933970 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:31 crc kubenswrapper[4706]: I1206 05:20:31.933995 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:31Z","lastTransitionTime":"2025-12-06T05:20:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.036591 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.036591 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.036772 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.037002 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.037279 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.037451 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.038414 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.038476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.038495 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.038522 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.038541 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.141711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.141792 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.141816 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.141852 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.141874 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.240093 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.240161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.240178 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.240203 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.240222 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.262743 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.268464 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.268545 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.268564 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.268647 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.268667 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.292507 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.298345 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.298387 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.298402 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.298424 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.298440 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.322839 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.329952 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.330012 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.330032 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.330094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.330117 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.355489 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.369442 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.369530 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.369553 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.369586 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.369609 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.389608 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: E1206 05:20:32.389871 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.392448 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.392498 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.392515 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.392537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.392553 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.495667 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.495728 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.495745 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.495774 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.495799 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.573604 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.587979 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.597371 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.598710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.598800 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.598851 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.598879 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.598900 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.615687 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.630524 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.690343 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.702095 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.702416 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.702679 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.702917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.703179 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.712903 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.731218 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.752356 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.769931 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.791815 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.806802 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.807417 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.807542 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.807674 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.807776 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.808831 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.832713 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.849607 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.869629 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.892331 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.910914 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.912348 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.912399 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.912419 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.912447 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.912467 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:32Z","lastTransitionTime":"2025-12-06T05:20:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:32 crc kubenswrapper[4706]: I1206 05:20:32.926630 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:32Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.015695 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.015773 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.015789 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.015816 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.015834 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.036215 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:33 crc kubenswrapper[4706]: E1206 05:20:33.036458 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.119776 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.119876 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.119897 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.119929 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.119958 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.223448 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.223890 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.224011 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.224197 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.224328 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.331979 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.332078 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.332105 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.332137 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.332159 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.435008 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.435089 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.435104 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.435124 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.435137 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.538087 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.538131 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.538143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.538171 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.538218 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.640559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.640614 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.640623 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.640640 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.640650 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.743666 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.743721 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.743735 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.743755 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.743768 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.847781 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.847826 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.847840 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.847860 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.847875 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.950584 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.950641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.950654 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.950677 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:33 crc kubenswrapper[4706]: I1206 05:20:33.950691 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:33Z","lastTransitionTime":"2025-12-06T05:20:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.035684 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.035750 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.035788 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:34 crc kubenswrapper[4706]: E1206 05:20:34.035863 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:34 crc kubenswrapper[4706]: E1206 05:20:34.036189 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:34 crc kubenswrapper[4706]: E1206 05:20:34.036039 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.053838 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.053893 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.053908 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.053927 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.053946 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.157732 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.157787 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.157801 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.157823 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.157839 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.260610 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.261041 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.261079 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.261195 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.261210 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.364005 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.364103 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.364112 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.364131 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.364142 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.467818 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.467868 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.467888 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.467908 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.467920 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.483381 4706 generic.go:334] "Generic (PLEG): container finished" podID="87424bac-c58b-4fae-8f44-443e202bf113" containerID="c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362" exitCode=0 Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.483441 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerDied","Data":"c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.485281 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" event={"ID":"95f64f73-47d5-4156-8eb5-539fa23b4202","Type":"ContainerStarted","Data":"ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.497537 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.511311 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.521986 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.532798 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.543005 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.556866 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.569028 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.570845 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.570891 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.570904 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.570924 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.570936 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.580538 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.592615 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.604649 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.618211 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.629211 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.642275 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.655652 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.670037 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.672899 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.673039 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.673077 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.673098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.673111 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.689493 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.715832 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:34Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.776593 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.776682 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.776706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.776741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.776767 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.879154 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.879193 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.879203 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.879222 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.879232 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.981963 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.982011 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.982023 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.982041 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:34 crc kubenswrapper[4706]: I1206 05:20:34.982078 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:34Z","lastTransitionTime":"2025-12-06T05:20:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.035648 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:35 crc kubenswrapper[4706]: E1206 05:20:35.035806 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.086007 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.086067 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.086078 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.086096 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.086110 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.189180 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.189220 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.189230 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.189249 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.189261 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.291741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.291781 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.291792 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.291810 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.291820 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.394928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.394977 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.394989 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.395007 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.395016 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.497225 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.497271 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.497283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.497302 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.497313 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.502256 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.516328 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.532464 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.546112 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.559018 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.573911 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.587862 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.598232 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.599218 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.599260 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.599270 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.599287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.599297 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.610690 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.623289 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.634458 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.645816 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.656827 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.667683 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.680522 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.692538 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.701343 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.701385 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.701407 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.701424 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.701436 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.709882 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:35Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.805575 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.805617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.805626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.805642 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.805654 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.907649 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.907692 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.907702 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.907719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:35 crc kubenswrapper[4706]: I1206 05:20:35.907730 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:35Z","lastTransitionTime":"2025-12-06T05:20:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.011068 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.011121 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.011133 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.011155 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.011200 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.035603 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:36 crc kubenswrapper[4706]: E1206 05:20:36.035817 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.036371 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.036453 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:36 crc kubenswrapper[4706]: E1206 05:20:36.036610 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:36 crc kubenswrapper[4706]: E1206 05:20:36.036805 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.114403 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.114447 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.114459 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.114476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.114487 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.217666 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.217722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.217733 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.217755 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.217768 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.320499 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.320554 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.320566 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.320589 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.320604 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.423438 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.423490 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.423498 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.423517 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.423529 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.496351 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" event={"ID":"87424bac-c58b-4fae-8f44-443e202bf113","Type":"ContainerStarted","Data":"9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.497750 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/0.log" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.501234 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396" exitCode=1 Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.501276 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.502256 4706 scope.go:117] "RemoveContainer" containerID="5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.514218 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.529143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.529199 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.529213 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.529236 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.529253 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.533028 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.547309 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.564105 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.576940 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.587547 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.609944 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.626242 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.632181 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.632270 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.632283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.632337 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.632354 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.641303 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.654370 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.671834 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.688166 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.705598 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.719363 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.736201 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.736910 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.736962 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.736975 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.736996 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.737012 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.752997 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.775283 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.790114 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.810164 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.832340 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"message\\\":\\\"2182 5964 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872281 5964 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872383 5964 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1206 05:20:34.872408 5964 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1206 05:20:34.872414 5964 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1206 05:20:34.872431 5964 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1206 05:20:34.872436 5964 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1206 05:20:34.872456 5964 handler.go:208] Removed *v1.Node event handler 7\\\\nI1206 05:20:34.872464 5964 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1206 05:20:34.872470 5964 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1206 05:20:34.872479 5964 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1206 05:20:34.872485 5964 handler.go:208] Removed *v1.Node event handler 2\\\\nI1206 05:20:34.872776 5964 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1206 05:20:34.872811 5964 factory.go:656] Stopping watch factory\\\\nI1206 05:20:34.872823 5964 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.839635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.839701 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.839719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.839744 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.839757 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.849166 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.865800 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.883162 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.896305 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.911303 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.928999 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.943035 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.943392 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.943514 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.943630 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.943729 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:36Z","lastTransitionTime":"2025-12-06T05:20:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.945330 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.961524 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.977684 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:36 crc kubenswrapper[4706]: I1206 05:20:36.993981 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:36Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.008918 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.025639 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.035746 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.035909 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.038480 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.046143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.046231 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.046266 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.046292 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.046496 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.052935 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.149462 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.149554 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.149578 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.149612 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.149636 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.199731 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.199932 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.200018 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:20:53.199994369 +0000 UTC m=+75.527818313 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.253168 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.253235 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.253253 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.253287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.253304 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.356394 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.356435 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.356444 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.356461 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.356473 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.459678 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.459726 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.459736 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.459752 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.459766 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.507893 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/0.log" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.511270 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.512088 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.531202 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.544720 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.560286 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.562298 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.562342 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.562353 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.562372 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.562384 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.573501 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.589514 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.608782 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.623823 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.636374 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.652866 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.664554 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.664590 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.664598 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.664617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.664627 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.666742 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.682259 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.694945 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.709546 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.729080 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"message\\\":\\\"2182 5964 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872281 5964 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872383 5964 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1206 05:20:34.872408 5964 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1206 05:20:34.872414 5964 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1206 05:20:34.872431 5964 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1206 05:20:34.872436 5964 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1206 05:20:34.872456 5964 handler.go:208] Removed *v1.Node event handler 7\\\\nI1206 05:20:34.872464 5964 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1206 05:20:34.872470 5964 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1206 05:20:34.872479 5964 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1206 05:20:34.872485 5964 handler.go:208] Removed *v1.Node event handler 2\\\\nI1206 05:20:34.872776 5964 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1206 05:20:34.872811 5964 factory.go:656] Stopping watch factory\\\\nI1206 05:20:34.872823 5964 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.740618 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.755417 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.766760 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.766848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.766858 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.766873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.766884 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.772917 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:37Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.870528 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.870609 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.870626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.870652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.870676 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.906227 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.906331 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.906442 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:21:09.906406101 +0000 UTC m=+92.234230045 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.906456 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.906537 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:21:09.906512914 +0000 UTC m=+92.234336898 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.906616 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.906748 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:37 crc kubenswrapper[4706]: E1206 05:20:37.906796 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:21:09.906783441 +0000 UTC m=+92.234607415 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.973855 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.973908 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.973929 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.973956 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:37 crc kubenswrapper[4706]: I1206 05:20:37.973972 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:37Z","lastTransitionTime":"2025-12-06T05:20:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.007573 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.007660 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.007902 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.007967 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.007973 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.007993 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.008006 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.008029 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.008119 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:21:10.008086303 +0000 UTC m=+92.335910287 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.008160 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:21:10.008142115 +0000 UTC m=+92.335966089 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.035112 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.035180 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.035142 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.035314 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.035385 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:38 crc kubenswrapper[4706]: E1206 05:20:38.035539 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.052118 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.071843 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.076919 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.076949 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.076959 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.077004 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.077014 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.088240 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.105924 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.117144 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.133598 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.152157 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.166465 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.182226 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.182273 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.182285 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.182303 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.182316 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.185468 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.199896 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.216859 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.232285 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.245645 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.259681 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.274779 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.285212 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.285533 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.285605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.285683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.285787 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.290450 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.317075 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"message\\\":\\\"2182 5964 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872281 5964 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872383 5964 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1206 05:20:34.872408 5964 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1206 05:20:34.872414 5964 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1206 05:20:34.872431 5964 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1206 05:20:34.872436 5964 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1206 05:20:34.872456 5964 handler.go:208] Removed *v1.Node event handler 7\\\\nI1206 05:20:34.872464 5964 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1206 05:20:34.872470 5964 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1206 05:20:34.872479 5964 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1206 05:20:34.872485 5964 handler.go:208] Removed *v1.Node event handler 2\\\\nI1206 05:20:34.872776 5964 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1206 05:20:34.872811 5964 factory.go:656] Stopping watch factory\\\\nI1206 05:20:34.872823 5964 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:38Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.390355 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.390428 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.390459 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.390484 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.390502 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.493803 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.493871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.493889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.493919 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.493938 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.600965 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.601015 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.601027 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.601068 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.601288 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.705117 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.705191 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.705209 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.705236 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.705255 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.807979 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.808140 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.808166 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.808686 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.808937 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.913118 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.913166 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.913178 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.913198 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:38 crc kubenswrapper[4706]: I1206 05:20:38.913212 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:38Z","lastTransitionTime":"2025-12-06T05:20:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.016353 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.016451 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.016481 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.016518 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.016540 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.035972 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:39 crc kubenswrapper[4706]: E1206 05:20:39.036184 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.121635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.121704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.121720 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.121742 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.121757 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.224246 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.224292 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.224302 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.224318 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.224330 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.327232 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.327316 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.327348 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.327379 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.327402 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.430752 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.430824 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.430849 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.430881 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.430905 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.522697 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/1.log" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.523487 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/0.log" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.528132 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d" exitCode=1 Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.528195 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.528253 4706 scope.go:117] "RemoveContainer" containerID="5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.529593 4706 scope.go:117] "RemoveContainer" containerID="7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d" Dec 06 05:20:39 crc kubenswrapper[4706]: E1206 05:20:39.529969 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.535645 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.535702 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.535714 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.535731 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.535740 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.553815 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.567360 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.585075 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.604138 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.618279 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.634303 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.638995 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.639065 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.639081 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.639103 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.639118 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.651940 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.670351 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.686240 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.709139 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.721665 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.736757 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.741400 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.741455 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.741464 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.741482 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.741492 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.751741 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.767438 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.786826 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"message\\\":\\\"2182 5964 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872281 5964 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872383 5964 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1206 05:20:34.872408 5964 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1206 05:20:34.872414 5964 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1206 05:20:34.872431 5964 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1206 05:20:34.872436 5964 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1206 05:20:34.872456 5964 handler.go:208] Removed *v1.Node event handler 7\\\\nI1206 05:20:34.872464 5964 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1206 05:20:34.872470 5964 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1206 05:20:34.872479 5964 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1206 05:20:34.872485 5964 handler.go:208] Removed *v1.Node event handler 2\\\\nI1206 05:20:34.872776 5964 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1206 05:20:34.872811 5964 factory.go:656] Stopping watch factory\\\\nI1206 05:20:34.872823 5964 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:38Z\\\",\\\"message\\\":\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1206 05:20:37.765932 6203 ovnkube.go:599] Stopped ovnkube\\\\nI1206 05:20:37.766109 6203 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1206 05:20:37.766381 6203 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1206 05:20:37.766383 6203 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1206 05:20:37.766443 6203 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:37.766451 6203 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.800523 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.816788 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:39Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.844672 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.844710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.844720 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.844739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.844751 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.948244 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.948317 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.948336 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.948363 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:39 crc kubenswrapper[4706]: I1206 05:20:39.948381 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:39Z","lastTransitionTime":"2025-12-06T05:20:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.037074 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.037217 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.036983 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:40 crc kubenswrapper[4706]: E1206 05:20:40.037667 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:40 crc kubenswrapper[4706]: E1206 05:20:40.037949 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:40 crc kubenswrapper[4706]: E1206 05:20:40.038264 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.053340 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.053388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.053406 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.053530 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.053592 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.159740 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.159808 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.159827 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.159852 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.159867 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.262357 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.262399 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.262410 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.262426 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.262437 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.365197 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.365234 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.365245 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.365262 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.365274 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.468629 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.468690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.468702 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.468722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.468738 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.533331 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/1.log" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.572699 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.572759 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.572778 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.572798 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.572809 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.675719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.675758 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.675769 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.675786 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.675797 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.777911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.777952 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.777960 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.777975 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.777984 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.880583 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.880633 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.880641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.880656 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.880666 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.983992 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.984135 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.984156 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.984180 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:40 crc kubenswrapper[4706]: I1206 05:20:40.984193 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:40Z","lastTransitionTime":"2025-12-06T05:20:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.035640 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:41 crc kubenswrapper[4706]: E1206 05:20:41.035815 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.086581 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.086632 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.086644 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.086666 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.086685 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.188730 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.188772 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.188781 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.188797 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.188809 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.291641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.291667 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.291675 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.291690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.291698 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.394882 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.394933 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.394943 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.394961 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.394971 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.497923 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.497962 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.497971 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.497988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.498000 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.601069 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.601116 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.601128 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.601142 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.601152 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.703275 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.703314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.703325 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.703344 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.703356 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.806709 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.806759 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.806768 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.806784 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.806793 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.909779 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.909825 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.909837 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.909853 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:41 crc kubenswrapper[4706]: I1206 05:20:41.909864 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:41Z","lastTransitionTime":"2025-12-06T05:20:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.012388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.012438 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.012455 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.012503 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.012516 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.035878 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.035923 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.036129 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.036232 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.036355 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.036439 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.116790 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.116862 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.116886 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.116917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.116946 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.220431 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.220474 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.220486 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.220504 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.220516 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.323312 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.323357 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.323367 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.323385 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.323397 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.429432 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.429478 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.429490 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.429510 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.429522 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.468366 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.468405 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.468414 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.468429 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.468439 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.482617 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:42Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.486828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.486879 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.486895 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.486914 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.486926 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.500902 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:42Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.504970 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.505159 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.505241 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.505334 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.505402 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.517989 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:42Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.521772 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.521809 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.521823 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.521839 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.521849 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.536898 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:42Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.542258 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.542313 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.542328 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.542349 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.542362 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.563728 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:42Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:42 crc kubenswrapper[4706]: E1206 05:20:42.563855 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.565386 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.565400 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.565408 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.565421 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.565430 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.667920 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.667967 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.667980 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.667998 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.668010 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.770198 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.770243 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.770255 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.770274 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.770284 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.872739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.872782 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.872793 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.872813 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.872825 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.975671 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.975713 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.975722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.975741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:42 crc kubenswrapper[4706]: I1206 05:20:42.975754 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:42Z","lastTransitionTime":"2025-12-06T05:20:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.035727 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:43 crc kubenswrapper[4706]: E1206 05:20:43.035888 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.078609 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.078652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.078665 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.078683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.078695 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.185460 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.185668 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.185697 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.185724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.185734 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.288029 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.288377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.288468 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.288541 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.288615 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.391221 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.391258 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.391268 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.391284 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.391320 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.494240 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.494272 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.494284 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.494297 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.494306 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.596133 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.596164 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.596172 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.596186 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.596195 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.698098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.698139 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.698149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.698167 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.698178 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.801307 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.801358 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.801370 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.801388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.801402 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.904770 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.904821 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.904834 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.904855 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:43 crc kubenswrapper[4706]: I1206 05:20:43.904868 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:43Z","lastTransitionTime":"2025-12-06T05:20:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.007865 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.007910 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.007923 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.007941 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.007953 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.035942 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.035948 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.035979 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:44 crc kubenswrapper[4706]: E1206 05:20:44.036181 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:44 crc kubenswrapper[4706]: E1206 05:20:44.036282 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:44 crc kubenswrapper[4706]: E1206 05:20:44.036335 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.110345 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.110401 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.110421 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.110439 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.110449 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.212725 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.212782 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.212795 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.212815 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.212830 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.315946 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.315989 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.316001 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.316018 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.316029 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.418386 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.418454 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.418463 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.418481 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.418491 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.522721 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.522761 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.522772 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.522790 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.522802 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.625118 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.625177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.625195 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.625217 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.625234 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.728422 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.728527 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.728558 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.728596 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.728620 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.831949 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.832016 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.832033 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.832086 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.832103 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.934161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.934199 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.934210 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.934224 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:44 crc kubenswrapper[4706]: I1206 05:20:44.934233 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:44Z","lastTransitionTime":"2025-12-06T05:20:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.035126 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:45 crc kubenswrapper[4706]: E1206 05:20:45.035268 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.036350 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.036401 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.036409 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.036427 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.036438 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.139724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.139774 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.139787 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.139807 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.139821 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.242389 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.242425 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.242433 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.242449 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.242460 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.344992 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.345088 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.345107 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.345134 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.345151 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.448665 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.448910 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.448929 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.448954 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.448971 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.551112 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.551164 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.551178 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.551195 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.551207 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.658171 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.658287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.658332 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.658488 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.658583 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.761643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.761703 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.761720 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.761748 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.761768 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.868431 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.868483 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.868494 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.868512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.868523 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.972179 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.972230 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.972243 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.972262 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:45 crc kubenswrapper[4706]: I1206 05:20:45.972275 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:45Z","lastTransitionTime":"2025-12-06T05:20:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.035654 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.035723 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:46 crc kubenswrapper[4706]: E1206 05:20:46.035812 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:46 crc kubenswrapper[4706]: E1206 05:20:46.035874 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.035943 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:46 crc kubenswrapper[4706]: E1206 05:20:46.036211 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.076776 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.076814 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.076828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.076846 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.076858 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.179599 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.179637 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.179645 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.179662 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.179674 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.282423 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.282459 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.282468 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.282483 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.282493 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.384958 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.385008 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.385017 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.385035 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.385071 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.488121 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.488185 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.488204 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.488230 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.488247 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.591715 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.591763 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.591772 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.591791 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.591802 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.694749 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.694818 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.694828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.694849 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.694859 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.797375 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.797476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.797490 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.797511 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.797530 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.900712 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.900767 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.900776 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.900793 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:46 crc kubenswrapper[4706]: I1206 05:20:46.900803 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:46Z","lastTransitionTime":"2025-12-06T05:20:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.003828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.003888 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.003906 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.003930 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.003949 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.035761 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:47 crc kubenswrapper[4706]: E1206 05:20:47.035933 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.106487 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.106539 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.106550 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.106570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.106586 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.208604 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.208658 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.208675 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.208694 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.208707 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.311903 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.311990 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.312013 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.312081 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.312113 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.414578 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.414625 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.414637 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.414655 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.414669 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.516429 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.516495 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.516508 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.516527 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.516541 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.618982 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.619039 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.619080 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.619121 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.619139 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.722189 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.722239 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.722255 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.722273 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.722284 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.825092 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.825130 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.825141 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.825159 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.825171 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.927730 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.927785 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.927803 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.927827 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:47 crc kubenswrapper[4706]: I1206 05:20:47.927843 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:47Z","lastTransitionTime":"2025-12-06T05:20:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.034708 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.034758 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.034769 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.034786 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.034824 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.035039 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.035192 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.035266 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:48 crc kubenswrapper[4706]: E1206 05:20:48.035224 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:48 crc kubenswrapper[4706]: E1206 05:20:48.035403 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:48 crc kubenswrapper[4706]: E1206 05:20:48.035476 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.052560 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.066021 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.087114 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5fc0492facbe5fcaf26a38cbf0d8fc33478e728f67001f9f121eed30a8807396\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"message\\\":\\\"2182 5964 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872281 5964 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1206 05:20:34.872383 5964 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1206 05:20:34.872408 5964 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1206 05:20:34.872414 5964 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1206 05:20:34.872431 5964 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1206 05:20:34.872436 5964 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1206 05:20:34.872456 5964 handler.go:208] Removed *v1.Node event handler 7\\\\nI1206 05:20:34.872464 5964 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1206 05:20:34.872470 5964 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1206 05:20:34.872479 5964 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1206 05:20:34.872485 5964 handler.go:208] Removed *v1.Node event handler 2\\\\nI1206 05:20:34.872776 5964 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1206 05:20:34.872811 5964 factory.go:656] Stopping watch factory\\\\nI1206 05:20:34.872823 5964 handler.go:208] Removed *v1.EgressIP ev\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:38Z\\\",\\\"message\\\":\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1206 05:20:37.765932 6203 ovnkube.go:599] Stopped ovnkube\\\\nI1206 05:20:37.766109 6203 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1206 05:20:37.766381 6203 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1206 05:20:37.766383 6203 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1206 05:20:37.766443 6203 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:37.766451 6203 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.097498 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.110589 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.122838 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.134272 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.138988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.139022 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.139035 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.139071 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.139084 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.147086 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.158437 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.176922 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.195574 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.210869 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.226393 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.239122 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.241089 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.241125 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.241134 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.241149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.241160 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.254418 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.267506 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.281603 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:48Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.344245 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.344425 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.344500 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.344569 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.344625 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.446928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.446968 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.446977 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.446994 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.447004 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.550424 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.550473 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.550485 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.550502 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.550514 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.652846 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.652911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.652925 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.652948 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.652960 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.755161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.755433 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.755516 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.755600 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.755682 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.858950 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.859193 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.859257 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.859318 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.859412 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.962491 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.962532 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.962575 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.962592 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:48 crc kubenswrapper[4706]: I1206 05:20:48.962602 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:48Z","lastTransitionTime":"2025-12-06T05:20:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.035918 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:49 crc kubenswrapper[4706]: E1206 05:20:49.036114 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.065408 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.065466 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.065475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.065489 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.065499 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.168065 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.168106 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.168120 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.168139 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.168151 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.271001 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.271047 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.271075 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.271093 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.271105 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.374306 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.374354 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.374363 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.374378 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.374387 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.477231 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.477305 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.477327 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.477358 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.477381 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.580035 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.580103 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.580115 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.580134 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.580146 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.683683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.683741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.683752 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.683772 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.683783 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.786560 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.786605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.786619 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.786643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.786657 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.889650 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.889723 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.889746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.889778 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.889803 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.992161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.992202 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.992212 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.992247 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:49 crc kubenswrapper[4706]: I1206 05:20:49.992259 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:49Z","lastTransitionTime":"2025-12-06T05:20:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.036203 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.036221 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:50 crc kubenswrapper[4706]: E1206 05:20:50.036365 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:50 crc kubenswrapper[4706]: E1206 05:20:50.036491 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.036247 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:50 crc kubenswrapper[4706]: E1206 05:20:50.036711 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.094978 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.095024 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.095037 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.095074 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.095088 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.217785 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.218047 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.218091 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.218119 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.218134 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.320649 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.320693 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.320703 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.320722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.320732 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.423615 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.423668 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.423683 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.423703 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.423720 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.526707 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.526752 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.526761 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.526777 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.526787 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.629554 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.629613 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.629631 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.629661 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.629682 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.732744 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.732780 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.732793 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.732812 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.732823 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.835665 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.835730 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.835747 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.835777 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.835794 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.939321 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.939377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.939386 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.939404 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:50 crc kubenswrapper[4706]: I1206 05:20:50.939416 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:50Z","lastTransitionTime":"2025-12-06T05:20:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.035598 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:51 crc kubenswrapper[4706]: E1206 05:20:51.035760 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.042123 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.042226 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.042253 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.042280 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.042303 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.144710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.144760 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.144769 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.144783 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.144797 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.248559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.248628 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.248650 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.248673 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.248691 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.351335 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.351380 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.351414 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.351432 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.351444 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.453447 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.453488 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.453497 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.453513 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.453524 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.558291 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.558351 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.558361 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.558374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.558384 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.661502 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.661570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.661583 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.661609 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.661666 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.764472 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.764515 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.764523 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.764538 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.764547 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.867097 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.867126 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.867135 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.867149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.867158 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.969831 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.969940 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.969949 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.969990 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:51 crc kubenswrapper[4706]: I1206 05:20:51.970001 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:51Z","lastTransitionTime":"2025-12-06T05:20:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.035717 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.035798 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.035860 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.035871 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.035955 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.035991 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.036695 4706 scope.go:117] "RemoveContainer" containerID="7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.052785 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.064517 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.073154 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.073189 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.073199 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.073214 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.073225 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.078080 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.089834 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.103655 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.118166 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.128180 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.139334 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.149557 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.161394 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.172648 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.175580 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.175648 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.175663 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.175684 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.175698 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.185672 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.195801 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.207452 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.225532 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:38Z\\\",\\\"message\\\":\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1206 05:20:37.765932 6203 ovnkube.go:599] Stopped ovnkube\\\\nI1206 05:20:37.766109 6203 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1206 05:20:37.766381 6203 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1206 05:20:37.766383 6203 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1206 05:20:37.766443 6203 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:37.766451 6203 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.236375 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.246513 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.282625 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.282661 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.282675 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.282693 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.282705 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.385875 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.385918 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.385928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.385944 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.385955 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.489000 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.489097 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.489109 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.489131 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.489142 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.593857 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.593902 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.593918 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.593937 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.593955 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.611201 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.611245 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.611257 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.611273 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.611284 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.629631 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.633384 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.633417 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.633429 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.633447 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.633459 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.643734 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.647220 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.647257 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.647269 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.647292 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.647306 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.663781 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.667800 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.667854 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.667877 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.667906 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.667929 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.679291 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.683479 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.683522 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.683544 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.683573 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.683594 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.701263 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:52Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:52Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:52 crc kubenswrapper[4706]: E1206 05:20:52.701626 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.703416 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.703460 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.703476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.703499 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.703514 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.806233 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.806283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.806300 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.806324 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.806340 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.908529 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.908617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.908627 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.908643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:52 crc kubenswrapper[4706]: I1206 05:20:52.908654 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:52Z","lastTransitionTime":"2025-12-06T05:20:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.012121 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.012151 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.012160 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.012174 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.012183 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.035847 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:53 crc kubenswrapper[4706]: E1206 05:20:53.035986 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.115156 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.115200 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.115212 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.115228 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.115238 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.218721 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.218765 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.218777 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.218795 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.218808 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.267504 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:53 crc kubenswrapper[4706]: E1206 05:20:53.267686 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:53 crc kubenswrapper[4706]: E1206 05:20:53.267757 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:21:25.26773614 +0000 UTC m=+107.595560094 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.321940 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.322004 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.322028 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.322098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.322127 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.425020 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.425088 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.425102 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.425121 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.425136 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.527704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.527781 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.527802 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.527832 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.527853 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.581285 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/1.log" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.586425 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.631458 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.631506 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.631522 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.631544 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.631558 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.734193 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.734482 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.734579 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.734744 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.734887 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.837491 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.837520 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.837530 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.837546 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.837557 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.939898 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.939926 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.939935 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.939952 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:53 crc kubenswrapper[4706]: I1206 05:20:53.939963 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:53Z","lastTransitionTime":"2025-12-06T05:20:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.035523 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.035594 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.035586 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:54 crc kubenswrapper[4706]: E1206 05:20:54.035666 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:54 crc kubenswrapper[4706]: E1206 05:20:54.035887 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:54 crc kubenswrapper[4706]: E1206 05:20:54.036018 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.041736 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.041782 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.041808 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.041846 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.041857 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.144681 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.144727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.144739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.144755 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.144764 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.247165 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.247215 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.247227 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.247248 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.247259 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.350475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.350519 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.350532 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.350550 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.350562 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.453472 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.453512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.453520 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.453533 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.453542 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.555969 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.556002 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.556012 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.556025 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.556033 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.589453 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.610651 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.621629 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.636233 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.655149 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.658273 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.658334 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.658355 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.658383 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.658402 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.669444 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.684464 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.702602 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.720025 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.734631 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.753957 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.761396 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.761475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.761505 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.761537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.761558 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.775776 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.807223 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:38Z\\\",\\\"message\\\":\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1206 05:20:37.765932 6203 ovnkube.go:599] Stopped ovnkube\\\\nI1206 05:20:37.766109 6203 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1206 05:20:37.766381 6203 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1206 05:20:37.766383 6203 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1206 05:20:37.766443 6203 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:37.766451 6203 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.828484 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.848937 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.864308 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.864386 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.864405 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.864432 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.864449 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.868997 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.885185 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.900349 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.967853 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.967898 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.967909 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.967925 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:54 crc kubenswrapper[4706]: I1206 05:20:54.967936 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:54Z","lastTransitionTime":"2025-12-06T05:20:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.035177 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:55 crc kubenswrapper[4706]: E1206 05:20:55.035400 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.070881 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.070946 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.070965 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.070990 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.071010 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.174142 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.174247 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.174315 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.174350 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.174375 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.277307 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.277404 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.277429 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.277461 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.277490 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.381368 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.381439 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.381470 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.381503 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.381531 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.484177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.484237 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.484254 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.484278 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.484295 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.587729 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.587818 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.587843 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.587876 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.587895 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.594155 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/0.log" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.594214 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5" containerID="c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920" exitCode=1 Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.594287 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerDied","Data":"c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.594819 4706 scope.go:117] "RemoveContainer" containerID="c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.597607 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/2.log" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.599740 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/1.log" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.606940 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f" exitCode=1 Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.607020 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.607118 4706 scope.go:117] "RemoveContainer" containerID="7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.608483 4706 scope.go:117] "RemoveContainer" containerID="771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f" Dec 06 05:20:55 crc kubenswrapper[4706]: E1206 05:20:55.608897 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.617999 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.643212 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.661817 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.681902 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.695528 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.695936 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.695956 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.695984 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.696003 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.708436 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.730189 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.754943 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.771588 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.792986 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.799242 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.799325 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.799347 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.799378 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.799400 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.813829 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.829505 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.879348 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.899874 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.902462 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.902525 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.902552 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.902617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.902645 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:55Z","lastTransitionTime":"2025-12-06T05:20:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.922640 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.945510 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:55 crc kubenswrapper[4706]: I1206 05:20:55.972694 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:55Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.006475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.006543 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.006562 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.006595 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.006620 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.015077 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:38Z\\\",\\\"message\\\":\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1206 05:20:37.765932 6203 ovnkube.go:599] Stopped ovnkube\\\\nI1206 05:20:37.766109 6203 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1206 05:20:37.766381 6203 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1206 05:20:37.766383 6203 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1206 05:20:37.766443 6203 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:37.766451 6203 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.035282 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.035330 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.035427 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:56 crc kubenswrapper[4706]: E1206 05:20:56.035614 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:56 crc kubenswrapper[4706]: E1206 05:20:56.036097 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:56 crc kubenswrapper[4706]: E1206 05:20:56.036449 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.050365 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7d53a10e6334cf43d2d8739fc829e0d020eb49cefe9e56294d175303aa3e755d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:38Z\\\",\\\"message\\\":\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1206 05:20:37.765932 6203 ovnkube.go:599] Stopped ovnkube\\\\nI1206 05:20:37.766109 6203 obj_retry.go:365] Adding new object: *v1.Pod openshift-network-diagnostics/network-check-target-xd92c\\\\nI1206 05:20:37.766381 6203 ovn.go:134] Ensuring zone local for Pod openshift-network-diagnostics/network-check-target-xd92c in node crc\\\\nI1206 05:20:37.766383 6203 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1206 05:20:37.766443 6203 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-target-xd92c] creating logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:37.766451 6203 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed \\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.073959 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.097327 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.109620 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.109867 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.110031 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.110320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.110512 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.120189 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.137798 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.160906 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.178432 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.194868 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.210763 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.213996 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.214160 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.214177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.214197 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.214210 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.226925 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.249973 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.267261 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.286682 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.300784 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.318739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.318815 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.318838 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.318870 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.318890 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.319689 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.343154 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.358253 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.421570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.421633 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.421657 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.421689 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.421713 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.524384 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.524456 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.524473 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.524494 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.524509 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.613544 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/2.log" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.619023 4706 scope.go:117] "RemoveContainer" containerID="771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f" Dec 06 05:20:56 crc kubenswrapper[4706]: E1206 05:20:56.619409 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.627849 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.627885 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.627896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.627911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.627924 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.637944 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.657987 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.676760 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.700220 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.716253 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.732098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.732154 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.732173 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.732202 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.732220 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.736834 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.758540 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.793268 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.826295 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.835178 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.835244 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.835266 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.835293 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.835309 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.855382 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.874998 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.890210 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.908078 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.927353 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.939882 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.939928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.939938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.939958 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.939970 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:56Z","lastTransitionTime":"2025-12-06T05:20:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.944212 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.962859 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:55Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:56 crc kubenswrapper[4706]: I1206 05:20:56.986977 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:56Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.036155 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:57 crc kubenswrapper[4706]: E1206 05:20:57.036355 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.042939 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.042993 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.043007 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.043024 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.043033 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.146221 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.146259 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.146268 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.146283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.146293 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.249962 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.250005 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.250014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.250030 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.250039 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.353726 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.353794 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.353811 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.353844 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.353865 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.456467 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.456526 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.456541 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.456565 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.456581 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.559229 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.559284 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.559296 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.559314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.559327 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.622810 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/0.log" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.622866 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerStarted","Data":"3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.637628 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.660950 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.662501 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.662577 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.662600 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.662630 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.662650 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.695023 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.712603 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.732774 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.754424 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.766134 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.766181 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.766193 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.766210 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.766222 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.771308 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.789226 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.806715 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.833278 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.851108 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.869603 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.869688 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.869708 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.869730 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.869746 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.872688 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.894147 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.915484 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.935776 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.949940 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.968032 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:57Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.972660 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.972724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.972741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.972765 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:57 crc kubenswrapper[4706]: I1206 05:20:57.972781 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:57Z","lastTransitionTime":"2025-12-06T05:20:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.035547 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.035559 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:20:58 crc kubenswrapper[4706]: E1206 05:20:58.035748 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.035576 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:20:58 crc kubenswrapper[4706]: E1206 05:20:58.035863 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:20:58 crc kubenswrapper[4706]: E1206 05:20:58.036147 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.058563 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.075462 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.075569 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.075589 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.075616 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.075636 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.080764 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.095392 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.112109 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.136509 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.157151 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.171533 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.178537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.178579 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.178594 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.178617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.178634 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.184775 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.206161 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.227924 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.240929 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.254352 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.271914 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.281239 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.281285 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.281301 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.281324 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.281339 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.288890 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.310004 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.328169 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.347581 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:58Z is after 2025-08-24T17:21:41Z" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.385221 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.385281 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.385298 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.385412 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.385471 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.488911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.488959 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.488969 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.488986 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.488996 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.592755 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.592843 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.592866 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.592899 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.592924 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.696285 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.696345 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.696364 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.696389 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.696408 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.800275 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.800328 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.800340 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.800357 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.800368 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.902992 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.903032 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.903040 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.903074 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:58 crc kubenswrapper[4706]: I1206 05:20:58.903083 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:58Z","lastTransitionTime":"2025-12-06T05:20:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.005860 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.005895 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.005905 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.005920 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.005929 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.035363 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:20:59 crc kubenswrapper[4706]: E1206 05:20:59.035523 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.108922 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.108994 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.109015 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.109040 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.109074 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.212463 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.212517 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.212530 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.212547 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.212557 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.315387 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.315435 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.315447 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.315469 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.315483 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.418502 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.418581 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.418605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.418635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.418655 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.522224 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.522283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.522299 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.522319 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.522328 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.626377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.626474 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.626491 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.626522 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.626544 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.730312 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.730390 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.730408 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.730438 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.730460 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.833082 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.833138 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.833150 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.833167 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.833179 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.936389 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.936440 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.936450 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.936466 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:20:59 crc kubenswrapper[4706]: I1206 05:20:59.936482 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:20:59Z","lastTransitionTime":"2025-12-06T05:20:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.035774 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.035862 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:00 crc kubenswrapper[4706]: E1206 05:21:00.035928 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.035862 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:00 crc kubenswrapper[4706]: E1206 05:21:00.036090 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:00 crc kubenswrapper[4706]: E1206 05:21:00.036178 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.039003 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.039034 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.039061 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.039079 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.039090 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.142003 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.142040 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.142079 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.142098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.142111 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.248650 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.248695 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.248745 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.248768 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.248788 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.351938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.352001 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.352019 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.352038 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.352075 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.454540 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.454571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.454579 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.454593 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.454602 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.556865 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.556906 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.556917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.556933 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.556942 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.659496 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.659551 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.659565 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.659583 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.659595 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.762471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.762515 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.762526 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.762543 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.762553 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.865585 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.865676 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.865695 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.865724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.865743 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.969342 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.969426 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.969450 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.969482 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:00 crc kubenswrapper[4706]: I1206 05:21:00.969506 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:00Z","lastTransitionTime":"2025-12-06T05:21:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.035166 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:01 crc kubenswrapper[4706]: E1206 05:21:01.035353 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.071785 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.071830 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.071842 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.071861 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.071875 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.176330 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.176431 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.176459 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.176501 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.176524 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.280114 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.280163 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.280178 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.280199 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.280213 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.383455 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.383521 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.383537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.383566 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.383584 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.489090 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.489143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.489155 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.489177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.489189 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.592926 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.592992 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.593017 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.593079 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.593104 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.696014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.696100 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.696114 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.696135 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.696147 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.799262 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.799311 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.799323 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.799347 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.799360 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.902643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.902702 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.902722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.902746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:01 crc kubenswrapper[4706]: I1206 05:21:01.902763 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:01Z","lastTransitionTime":"2025-12-06T05:21:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.005426 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.005477 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.005499 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.005522 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.005536 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.036197 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.036269 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.036212 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.036408 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.036550 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.036622 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.107687 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.107725 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.107734 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.107748 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.107758 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.210606 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.210641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.210650 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.210666 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.210676 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.313491 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.313533 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.313547 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.313591 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.313602 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.415742 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.415794 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.415805 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.415823 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.415834 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.518848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.518901 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.518917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.518942 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.518957 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.621405 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.621457 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.621473 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.621496 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.621512 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.724761 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.724861 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.724886 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.724914 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.724939 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.771953 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.772036 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.772094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.772126 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.772150 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.789944 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:02Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.794599 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.794636 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.794647 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.794665 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.794677 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.814784 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:02Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.818923 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.818955 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.818968 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.818986 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.818999 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.841879 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:02Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.848252 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.848345 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.848369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.848399 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.848421 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.871325 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:02Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.876916 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.876974 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.876988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.877012 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.877026 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.905330 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:02Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:02Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:02 crc kubenswrapper[4706]: E1206 05:21:02.905515 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.907541 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.907611 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.907626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.907743 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:02 crc kubenswrapper[4706]: I1206 05:21:02.907831 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:02Z","lastTransitionTime":"2025-12-06T05:21:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.011805 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.011857 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.011867 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.011886 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.011900 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.035376 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:03 crc kubenswrapper[4706]: E1206 05:21:03.035567 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.114901 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.114960 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.114972 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.114997 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.115011 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.219484 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.219565 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.219584 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.219611 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.219632 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.323117 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.323204 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.323226 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.323256 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.323275 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.427513 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.427571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.427588 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.427616 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.427634 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.530517 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.530597 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.530614 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.530645 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.530670 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.634098 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.634174 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.634196 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.634226 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.634249 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.737507 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.737599 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.737635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.737669 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.737697 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.840608 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.840660 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.840677 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.840701 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.840718 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.944713 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.944795 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.944817 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.944845 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:03 crc kubenswrapper[4706]: I1206 05:21:03.944863 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:03Z","lastTransitionTime":"2025-12-06T05:21:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.035568 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.035584 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.035661 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:04 crc kubenswrapper[4706]: E1206 05:21:04.036625 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:04 crc kubenswrapper[4706]: E1206 05:21:04.036778 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:04 crc kubenswrapper[4706]: E1206 05:21:04.036873 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.047579 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.047635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.047653 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.047675 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.047693 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.151556 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.151632 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.151650 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.151680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.151700 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.257327 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.257401 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.257417 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.257442 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.258100 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.361748 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.361797 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.361815 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.361838 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.361856 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.466020 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.466115 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.466136 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.466161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.466178 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.570164 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.570258 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.570275 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.570338 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.570360 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.674398 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.674470 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.674489 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.674518 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.674538 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.776847 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.776902 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.776913 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.776931 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.776948 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.880519 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.880607 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.880626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.880657 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.880675 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.984394 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.984443 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.984453 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.984468 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:04 crc kubenswrapper[4706]: I1206 05:21:04.984479 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:04Z","lastTransitionTime":"2025-12-06T05:21:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.035759 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:05 crc kubenswrapper[4706]: E1206 05:21:05.036092 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.088262 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.088327 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.088350 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.088383 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.088405 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.191805 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.191871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.191892 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.191918 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.191936 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.296144 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.296228 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.296249 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.296282 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.296310 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.399135 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.399214 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.399229 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.399251 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.399267 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.502416 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.502455 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.502463 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.502477 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.502486 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.605409 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.605473 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.605488 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.605512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.605529 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.708456 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.708504 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.708513 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.708564 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.708576 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.812236 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.812310 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.812328 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.812352 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.812374 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.915303 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.915363 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.915379 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.915403 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:05 crc kubenswrapper[4706]: I1206 05:21:05.915420 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:05Z","lastTransitionTime":"2025-12-06T05:21:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.017920 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.017977 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.017990 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.018009 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.018022 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.035556 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.035635 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:06 crc kubenswrapper[4706]: E1206 05:21:06.035700 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:06 crc kubenswrapper[4706]: E1206 05:21:06.035794 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.035560 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:06 crc kubenswrapper[4706]: E1206 05:21:06.035950 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.121614 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.121676 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.121697 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.121724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.121741 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.225381 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.225440 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.225459 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.225483 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.225496 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.328318 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.328363 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.328374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.328390 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.328404 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.431985 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.432067 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.432083 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.432105 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.432119 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.535141 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.535220 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.535245 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.535279 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.535308 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.639304 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.639452 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.639477 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.639505 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.639524 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.743676 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.743749 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.743769 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.743797 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.743817 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.852654 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.852836 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.852892 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.852978 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.853025 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.957523 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.957604 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.957626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.957653 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:06 crc kubenswrapper[4706]: I1206 05:21:06.957673 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:06Z","lastTransitionTime":"2025-12-06T05:21:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.035248 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:07 crc kubenswrapper[4706]: E1206 05:21:07.035933 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.069401 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.071890 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.071980 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.072007 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.072101 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.072149 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.176717 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.176859 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.176880 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.176909 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.176929 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.280537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.280606 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.280624 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.280651 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.280669 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.383715 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.383804 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.383819 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.383838 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.383849 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.495690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.496129 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.496303 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.496450 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.496585 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.599977 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.600502 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.600703 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.600910 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.601163 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.703581 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.703629 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.703643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.703661 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.703673 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.807148 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.807200 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.807211 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.807232 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.807244 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.910753 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.910813 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.910829 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.910853 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:07 crc kubenswrapper[4706]: I1206 05:21:07.910870 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:07Z","lastTransitionTime":"2025-12-06T05:21:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.013317 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.013387 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.013408 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.013433 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.013451 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.035582 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.035625 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.035652 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:08 crc kubenswrapper[4706]: E1206 05:21:08.036322 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:08 crc kubenswrapper[4706]: E1206 05:21:08.036456 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:08 crc kubenswrapper[4706]: E1206 05:21:08.036660 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.060387 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb05183d-ba2e-489b-a6f1-36a898bdb6c9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441852619f2ec7f88f40ffeaae94403e82554da0fd0bce732f61ec414a5243f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15849519cedb4b18f33de96a1bfa7f615f304df9215dc973029423c19689eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f10c0032009486ff4d0e728b718ffe298807cee09d3cb8d39cee8795bc927a6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://865e45db3642879f721e641001b0ca8446a1f03ae8a51e0fe361028752bb6178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://275f05f6fac1cdcc29b6596af9c5ede5072c3662cf386ce888edb90dfac1241c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.079708 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.100132 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.115814 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.116711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.116827 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.116897 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.116969 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.117041 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.131884 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.145506 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.159019 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.174437 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.188033 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.200174 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.211697 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.219474 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.219520 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.219530 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.219546 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.219562 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.222835 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.238408 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.252748 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.266750 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.280273 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.292896 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.311399 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:08Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.321660 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.321709 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.321717 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.321734 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.321766 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.425373 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.425510 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.425531 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.425559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.425578 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.530878 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.530939 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.530953 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.530976 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.530990 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.635145 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.635228 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.635250 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.635278 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.635297 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.737788 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.737840 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.737853 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.737874 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.737887 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.841481 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.841536 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.841549 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.841595 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.841611 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.945360 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.945451 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.945471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.945494 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:08 crc kubenswrapper[4706]: I1206 05:21:08.945511 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:08Z","lastTransitionTime":"2025-12-06T05:21:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.035917 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:09 crc kubenswrapper[4706]: E1206 05:21:09.036224 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.047966 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.048027 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.048036 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.048070 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.048080 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.150653 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.150700 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.150709 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.150726 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.150736 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.254429 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.254501 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.254520 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.254548 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.254567 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.358485 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.358558 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.358579 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.358608 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.358631 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.462471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.462567 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.463112 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.463207 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.463493 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.568807 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.568886 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.568912 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.568949 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.568977 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.672759 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.672810 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.672825 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.672849 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.672866 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.776464 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.776539 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.776564 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.776598 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.776628 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.880649 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.880715 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.880740 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.880772 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.880795 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.984767 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.984827 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.984845 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.984871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.984888 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:09Z","lastTransitionTime":"2025-12-06T05:21:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.986375 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:21:09 crc kubenswrapper[4706]: E1206 05:21:09.986531 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:13.986497033 +0000 UTC m=+156.314321007 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.986649 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:09 crc kubenswrapper[4706]: I1206 05:21:09.986756 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:09 crc kubenswrapper[4706]: E1206 05:21:09.986874 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:21:09 crc kubenswrapper[4706]: E1206 05:21:09.986947 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:22:13.986933465 +0000 UTC m=+156.314757439 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:21:09 crc kubenswrapper[4706]: E1206 05:21:09.986874 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:21:09 crc kubenswrapper[4706]: E1206 05:21:09.987085 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:22:13.987042487 +0000 UTC m=+156.314866461 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.036379 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.036462 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.036625 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.036752 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.036886 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.037017 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.048316 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.087476 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.087561 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087793 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087823 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087844 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087861 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087912 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087925 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:22:14.087900618 +0000 UTC m=+156.415724602 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.087938 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:21:10 crc kubenswrapper[4706]: E1206 05:21:10.088039 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:22:14.088007991 +0000 UTC m=+156.415831975 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.089014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.089129 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.089149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.089180 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.089200 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.192735 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.192907 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.192931 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.192967 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.192989 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.295928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.295963 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.295971 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.295986 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.296000 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.398828 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.398885 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.398952 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.398978 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.398992 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.502539 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.502641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.502668 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.502708 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.502735 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.606620 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.606685 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.606705 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.606736 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.606760 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.711095 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.711167 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.711185 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.711214 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.711234 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.820195 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.820278 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.820312 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.820343 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.820364 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.923985 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.924088 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.924105 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.924129 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:10 crc kubenswrapper[4706]: I1206 05:21:10.924142 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:10Z","lastTransitionTime":"2025-12-06T05:21:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.028343 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.028413 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.028431 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.028460 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.028480 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.036087 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:11 crc kubenswrapper[4706]: E1206 05:21:11.036302 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.037524 4706 scope.go:117] "RemoveContainer" containerID="771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f" Dec 06 05:21:11 crc kubenswrapper[4706]: E1206 05:21:11.037789 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.132249 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.132320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.132339 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.132369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.132389 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.237488 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.237724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.237823 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.237866 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.237890 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.342585 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.342661 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.342866 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.342889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.342908 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.446471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.446572 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.446592 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.446618 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.446636 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.549934 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.550014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.550032 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.550144 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.550167 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.653706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.653783 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.653796 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.653818 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.653832 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.756866 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.756995 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.757018 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.757086 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.757108 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.860401 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.860462 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.860476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.860496 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.860509 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.963761 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.963811 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.963823 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.963840 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:11 crc kubenswrapper[4706]: I1206 05:21:11.963851 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:11Z","lastTransitionTime":"2025-12-06T05:21:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.035536 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.035725 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:12 crc kubenswrapper[4706]: E1206 05:21:12.035904 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.036381 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:12 crc kubenswrapper[4706]: E1206 05:21:12.036971 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:12 crc kubenswrapper[4706]: E1206 05:21:12.037172 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.067742 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.067824 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.067865 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.067901 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.067924 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.172115 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.172194 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.172444 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.172556 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.172588 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.277325 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.277381 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.277393 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.277411 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.277426 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.380789 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.380846 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.380864 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.380921 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.380949 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.485989 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.486037 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.486065 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.486085 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.486095 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.589014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.589097 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.589113 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.589141 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.589156 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.691639 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.691681 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.691692 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.691711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.691722 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.794801 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.794884 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.794909 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.794945 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.794968 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.898110 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.898170 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.898180 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.898205 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.898215 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.910941 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.911022 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.911094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.911131 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.911160 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: E1206 05:21:12.938579 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.943836 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.943884 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.943895 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.943911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.943921 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:12 crc kubenswrapper[4706]: E1206 05:21:12.976097 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:12Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.982708 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.982777 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.982798 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.982829 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:12 crc kubenswrapper[4706]: I1206 05:21:12.982848 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:12Z","lastTransitionTime":"2025-12-06T05:21:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: E1206 05:21:13.006653 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.011780 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.011839 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.011858 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.011886 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.011904 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: E1206 05:21:13.034549 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.035144 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:13 crc kubenswrapper[4706]: E1206 05:21:13.035330 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.040598 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.040642 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.040653 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.040696 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.040720 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: E1206 05:21:13.063916 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:13Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:13 crc kubenswrapper[4706]: E1206 05:21:13.064225 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.066544 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.066606 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.066620 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.066643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.066661 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.169943 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.170003 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.170016 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.170038 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.170092 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.272932 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.273005 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.273026 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.273082 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.273103 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.377270 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.377346 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.377373 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.377406 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.377430 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.481896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.481970 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.481990 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.482020 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.482040 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.585809 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.585967 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.585985 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.586014 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.586037 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.690428 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.690495 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.690512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.690538 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.690560 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.793592 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.793660 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.793678 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.793704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.793724 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.897766 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.897835 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.897857 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.897884 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:13 crc kubenswrapper[4706]: I1206 05:21:13.897903 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:13Z","lastTransitionTime":"2025-12-06T05:21:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.000801 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.000859 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.000873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.000892 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.000905 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.035536 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.035593 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.035537 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:14 crc kubenswrapper[4706]: E1206 05:21:14.035781 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:14 crc kubenswrapper[4706]: E1206 05:21:14.035884 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:14 crc kubenswrapper[4706]: E1206 05:21:14.036167 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.104694 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.104761 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.104780 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.104810 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.104830 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.208028 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.208177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.208209 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.208243 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.208275 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.311640 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.311719 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.311737 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.311768 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.311787 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.416147 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.416236 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.416263 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.416300 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.416330 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.526674 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.526750 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.526769 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.526797 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.526815 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.630918 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.631012 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.631031 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.631091 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.631112 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.734784 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.734851 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.734869 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.734902 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.734920 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.838645 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.838697 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.838709 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.838729 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.838743 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.942286 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.942360 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.942370 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.942388 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:14 crc kubenswrapper[4706]: I1206 05:21:14.942400 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:14Z","lastTransitionTime":"2025-12-06T05:21:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.035752 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:15 crc kubenswrapper[4706]: E1206 05:21:15.035975 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.045810 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.045872 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.045882 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.045903 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.045915 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.148493 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.148585 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.148603 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.148644 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.148665 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.252611 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.252699 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.252714 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.252740 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.252755 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.356040 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.356155 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.356176 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.356213 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.356234 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.459563 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.459610 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.459690 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.459710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.459721 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.562608 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.562680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.562709 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.562727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.562741 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.665075 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.665173 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.665190 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.665213 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.665227 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.767533 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.767575 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.767586 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.767605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.767617 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.869968 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.870032 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.870068 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.870091 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.870108 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.971897 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.971943 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.971956 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.971974 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:15 crc kubenswrapper[4706]: I1206 05:21:15.971987 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:15Z","lastTransitionTime":"2025-12-06T05:21:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.035129 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.035207 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.035236 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:16 crc kubenswrapper[4706]: E1206 05:21:16.035296 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:16 crc kubenswrapper[4706]: E1206 05:21:16.035382 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:16 crc kubenswrapper[4706]: E1206 05:21:16.035454 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.075528 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.075570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.075580 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.075620 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.075633 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.179320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.179377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.179389 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.179405 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.179415 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.282570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.282643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.282655 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.282675 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.282688 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.386196 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.386256 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.386266 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.386285 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.386298 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.490183 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.490260 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.490285 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.490322 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.490349 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.594979 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.595075 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.595101 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.595138 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.595162 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.697509 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.697593 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.697604 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.697628 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.697640 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.801253 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.801411 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.801429 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.801456 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.801474 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.905897 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.905995 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.906023 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.906107 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:16 crc kubenswrapper[4706]: I1206 05:21:16.906138 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:16Z","lastTransitionTime":"2025-12-06T05:21:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.010107 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.010183 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.010201 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.010242 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.010264 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.035932 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:17 crc kubenswrapper[4706]: E1206 05:21:17.036200 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.113768 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.113848 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.113866 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.113900 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.113920 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.223758 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.223836 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.223858 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.223890 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.223923 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.328123 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.328179 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.328198 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.328227 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.328246 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.432817 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.432900 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.432919 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.432952 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.432972 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.536969 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.537085 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.537132 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.537166 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.537185 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.640973 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.641076 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.641099 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.641126 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.641147 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.745339 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.745422 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.745443 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.745471 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.745491 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.850738 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.851269 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.851487 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.851656 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.851852 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.954819 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.954869 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.954882 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.954900 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:17 crc kubenswrapper[4706]: I1206 05:21:17.954913 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:17Z","lastTransitionTime":"2025-12-06T05:21:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.035928 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.035965 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.035989 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:18 crc kubenswrapper[4706]: E1206 05:21:18.036266 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:18 crc kubenswrapper[4706]: E1206 05:21:18.036422 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:18 crc kubenswrapper[4706]: E1206 05:21:18.037186 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.052719 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.056723 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.056776 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.056788 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.056807 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.056820 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.069998 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.081809 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.096434 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.113130 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.135228 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb05183d-ba2e-489b-a6f1-36a898bdb6c9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441852619f2ec7f88f40ffeaae94403e82554da0fd0bce732f61ec414a5243f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15849519cedb4b18f33de96a1bfa7f615f304df9215dc973029423c19689eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f10c0032009486ff4d0e728b718ffe298807cee09d3cb8d39cee8795bc927a6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://865e45db3642879f721e641001b0ca8446a1f03ae8a51e0fe361028752bb6178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://275f05f6fac1cdcc29b6596af9c5ede5072c3662cf386ce888edb90dfac1241c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.152975 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.158928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.158987 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.158997 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.159018 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.159034 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.171368 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.185127 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.200588 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.217816 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.238189 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.252767 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.262160 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.262210 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.262223 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.262242 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.262256 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.266939 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41d87265-f5cd-4b9b-8452-b8e30f10c51c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ddfcf1ee5cd3d1223607128c54cdf5c250d9467340e5d21f9d2f169c96477d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.284109 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.298629 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.313613 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.338321 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.353298 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:18Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.365601 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.365774 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.366451 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.366569 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.366657 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.470652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.470735 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.470749 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.470775 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.470798 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.573735 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.574374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.574469 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.574566 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.574667 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.682618 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.682722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.682740 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.682763 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.682786 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.785820 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.785891 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.785909 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.785938 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.785959 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.891622 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.891746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.891817 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.891932 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.892236 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.995229 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.995287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.995305 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.995334 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:18 crc kubenswrapper[4706]: I1206 05:21:18.995353 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:18Z","lastTransitionTime":"2025-12-06T05:21:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.035834 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:19 crc kubenswrapper[4706]: E1206 05:21:19.036015 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.099618 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.099685 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.099703 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.099732 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.099751 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.202928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.202982 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.202995 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.203013 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.203025 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.306317 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.306440 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.306463 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.306494 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.306514 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.409711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.409804 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.409832 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.409885 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.409921 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.513632 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.513707 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.513726 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.513754 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.513773 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.617304 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.617412 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.617431 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.617496 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.617517 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.722513 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.722570 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.722589 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.722616 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.722635 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.828011 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.828152 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.828218 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.828253 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.828311 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.931588 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.931677 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.931706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.931746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:19 crc kubenswrapper[4706]: I1206 05:21:19.931770 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:19Z","lastTransitionTime":"2025-12-06T05:21:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.035297 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.035456 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:20 crc kubenswrapper[4706]: E1206 05:21:20.035503 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.035583 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:20 crc kubenswrapper[4706]: E1206 05:21:20.035744 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:20 crc kubenswrapper[4706]: E1206 05:21:20.035877 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.036089 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.036165 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.036193 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.036222 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.036243 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.139663 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.139986 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.140124 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.140223 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.140287 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.243271 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.243562 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.243630 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.243706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.243825 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.345896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.346161 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.346243 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.346339 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.346424 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.449442 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.449800 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.449812 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.449832 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.449846 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.552826 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.552870 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.552879 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.552896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.552907 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.655563 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.655617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.655629 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.655677 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.655693 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.758552 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.758626 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.758636 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.758652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.758663 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.861659 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.861728 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.861746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.861773 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.861800 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.965291 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.965354 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.965371 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.965397 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:20 crc kubenswrapper[4706]: I1206 05:21:20.965417 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:20Z","lastTransitionTime":"2025-12-06T05:21:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.035967 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:21 crc kubenswrapper[4706]: E1206 05:21:21.036156 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.068856 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.068891 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.068900 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.068916 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.068927 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.172535 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.172586 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.172600 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.172621 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.172637 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.276435 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.276537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.276563 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.276599 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.276625 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.380662 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.380741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.380759 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.380788 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.380812 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.485705 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.485789 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.485802 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.485826 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.485842 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.589551 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.589634 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.589648 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.589695 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.589711 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.694221 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.694282 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.694295 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.694320 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.694333 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.797638 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.797738 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.797758 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.797783 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.797802 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.901892 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.902013 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.902038 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.902114 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:21 crc kubenswrapper[4706]: I1206 05:21:21.902135 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:21Z","lastTransitionTime":"2025-12-06T05:21:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.005732 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.005842 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.005873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.005904 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.005923 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.036231 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.036300 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:22 crc kubenswrapper[4706]: E1206 05:21:22.036491 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.036570 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:22 crc kubenswrapper[4706]: E1206 05:21:22.036749 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:22 crc kubenswrapper[4706]: E1206 05:21:22.036975 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.109718 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.109785 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.109802 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.109826 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.109845 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.214206 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.214277 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.214287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.214309 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.214354 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.317512 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.317601 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.317662 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.317688 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.317781 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.421094 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.421187 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.421202 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.421222 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.421257 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.525787 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.525865 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.525889 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.525924 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.525947 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.628855 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.628905 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.628917 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.628937 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.628952 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.732490 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.732559 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.732581 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.732605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.732629 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.835410 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.835459 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.835468 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.835491 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.835502 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.938226 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.938280 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.938296 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.938319 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:22 crc kubenswrapper[4706]: I1206 05:21:22.938336 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:22Z","lastTransitionTime":"2025-12-06T05:21:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.035999 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.036272 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.040988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.041033 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.041099 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.041122 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.041136 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.144274 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.144365 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.144391 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.144434 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.144453 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.247967 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.248027 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.248065 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.248087 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.248098 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.297526 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.297601 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.297617 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.297646 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.297666 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.326852 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:23Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.332643 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.332711 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.332724 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.332746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.332759 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.351578 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:23Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.357143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.357204 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.357221 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.357244 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.357260 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.380465 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:23Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.385802 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.385859 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.385871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.385891 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.385904 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.409104 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:23Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.414571 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.414636 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.414652 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.414684 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.414707 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.433703 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-12-06T05:21:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"8a86399a-ffbc-43cf-804f-ca5cf554f1d4\\\",\\\"systemUUID\\\":\\\"228676c3-f175-4087-a116-e5c2da56f712\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:23Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:23 crc kubenswrapper[4706]: E1206 05:21:23.433816 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.435641 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.435677 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.435688 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.435704 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.435714 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.537788 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.537839 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.537852 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.537871 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.537884 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.640575 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.640625 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.640642 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.640658 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.640668 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.742761 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.742797 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.742806 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.742822 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.742835 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.845974 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.846011 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.846023 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.846041 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.846074 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.948554 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.948605 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.948614 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.948629 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:23 crc kubenswrapper[4706]: I1206 05:21:23.948637 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:23Z","lastTransitionTime":"2025-12-06T05:21:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.035143 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.035229 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:24 crc kubenswrapper[4706]: E1206 05:21:24.035286 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.035325 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:24 crc kubenswrapper[4706]: E1206 05:21:24.035376 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:24 crc kubenswrapper[4706]: E1206 05:21:24.035404 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.036086 4706 scope.go:117] "RemoveContainer" containerID="771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.051071 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.051111 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.051124 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.051143 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.051159 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.154292 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.154340 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.154351 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.154369 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.154384 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.257304 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.257392 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.257414 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.257446 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.257466 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.361466 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.361525 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.361537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.361557 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.361571 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.464911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.464988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.465010 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.465087 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.465114 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.570097 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.570170 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.570190 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.570220 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.570249 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.673895 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.673942 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.673959 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.673988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.674008 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.777900 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.777984 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.778000 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.778235 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.778257 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.881830 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.881902 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.881920 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.881955 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.881977 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.985252 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.985314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.985332 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.985358 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:24 crc kubenswrapper[4706]: I1206 05:21:24.985377 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:24Z","lastTransitionTime":"2025-12-06T05:21:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.036143 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:25 crc kubenswrapper[4706]: E1206 05:21:25.036562 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.089034 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.089105 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.089116 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.089132 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.089142 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.191490 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.191532 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.191545 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.191563 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.191575 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.279715 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:25 crc kubenswrapper[4706]: E1206 05:21:25.279937 4706 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:21:25 crc kubenswrapper[4706]: E1206 05:21:25.280098 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs podName:f4065785-c72e-4c45-ab51-ce292be4f2ed nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.280038362 +0000 UTC m=+171.607862486 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs") pod "network-metrics-daemon-4ltjs" (UID: "f4065785-c72e-4c45-ab51-ce292be4f2ed") : object "openshift-multus"/"metrics-daemon-secret" not registered Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.294592 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.294660 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.294672 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.294692 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.294705 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.397688 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.397770 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.397788 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.397819 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.397837 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.501115 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.501201 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.501231 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.501266 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.501293 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.604972 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.605022 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.605036 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.605078 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.605091 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.707911 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.707974 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.707987 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.708008 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.708022 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.749726 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/2.log" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.753419 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.754085 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.779722 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb05183d-ba2e-489b-a6f1-36a898bdb6c9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441852619f2ec7f88f40ffeaae94403e82554da0fd0bce732f61ec414a5243f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15849519cedb4b18f33de96a1bfa7f615f304df9215dc973029423c19689eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f10c0032009486ff4d0e728b718ffe298807cee09d3cb8d39cee8795bc927a6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://865e45db3642879f721e641001b0ca8446a1f03ae8a51e0fe361028752bb6178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://275f05f6fac1cdcc29b6596af9c5ede5072c3662cf386ce888edb90dfac1241c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.800581 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.810393 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.810440 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.810454 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.810476 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.810488 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.819073 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.831237 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.842484 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.856533 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.867203 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.877986 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41d87265-f5cd-4b9b-8452-b8e30f10c51c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ddfcf1ee5cd3d1223607128c54cdf5c250d9467340e5d21f9d2f169c96477d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.898550 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.913536 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.913564 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.913578 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.913596 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.913610 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:25Z","lastTransitionTime":"2025-12-06T05:21:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.913710 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.927173 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.946839 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.963227 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:25 crc kubenswrapper[4706]: I1206 05:21:25.987740 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:25Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.004844 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:26Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.017406 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.017497 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.017521 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.017558 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.017585 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.021305 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:26Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.036139 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.036199 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:26 crc kubenswrapper[4706]: E1206 05:21:26.036319 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.036609 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:26 crc kubenswrapper[4706]: E1206 05:21:26.036602 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:26 crc kubenswrapper[4706]: E1206 05:21:26.037156 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.042023 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:26Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.065642 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:26Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.092818 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:21:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:26Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.121197 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.121283 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.121306 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.121378 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.121405 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.225004 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.225122 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.225149 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.225185 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.225210 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.328869 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.328947 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.328965 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.329032 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.329078 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.433681 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.433745 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.433767 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.433796 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.433821 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.537245 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.537360 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.537389 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.537418 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.537440 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.642197 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.642278 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.642328 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.642364 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.642388 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.746545 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.746665 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.746685 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.746717 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.746737 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.850216 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.850300 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.850321 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.850353 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.850375 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.953809 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.953930 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.953962 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.954032 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:26 crc kubenswrapper[4706]: I1206 05:21:26.954113 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:26Z","lastTransitionTime":"2025-12-06T05:21:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.036236 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:27 crc kubenswrapper[4706]: E1206 05:21:27.036537 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.058815 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.058878 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.058896 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.058923 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.058944 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.162129 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.162209 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.162230 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.162261 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.162282 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.265732 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.265807 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.265829 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.265869 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.265890 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.369334 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.369402 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.369428 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.369454 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.369476 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.473333 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.473427 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.473446 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.473477 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.473498 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.577112 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.577188 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.577215 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.577248 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.577275 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.680873 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.680942 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.680959 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.680988 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.681006 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.784634 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.784707 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.784721 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.784744 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.784760 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.888585 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.888659 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.888669 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.888689 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.888702 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.993007 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.993195 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.993226 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.993263 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:27 crc kubenswrapper[4706]: I1206 05:21:27.993291 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:27Z","lastTransitionTime":"2025-12-06T05:21:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.035699 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.035716 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:28 crc kubenswrapper[4706]: E1206 05:21:28.036009 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.036135 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:28 crc kubenswrapper[4706]: E1206 05:21:28.036217 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:28 crc kubenswrapper[4706]: E1206 05:21:28.036346 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.063969 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f0d98ffc627d73c53079af9b7c3a9a80645b01444cdb65ff4a0ab376fc2161a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.081561 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-zct8k" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08344ca0-306d-4ff1-81eb-cb9d32a4230a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://187b8977e4d04e7e8286b2d9d20195c160b39c10d38ded77630fa11790934da9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xg7vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-zct8k\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.098002 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.098116 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.098145 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.098183 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.098207 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.105916 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ae6d3c62-ad40-492b-9c35-d0043649cb81\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://39b9246d2bcffb43e90c3f9b4953ffce47215e9ed379fc024585ee4f9d89b054\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fwxq8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-z27rn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.125255 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4065785-c72e-4c45-ab51-ce292be4f2ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-vcf4l\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:21Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-4ltjs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.164001 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bb05183d-ba2e-489b-a6f1-36a898bdb6c9\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441852619f2ec7f88f40ffeaae94403e82554da0fd0bce732f61ec414a5243f5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d15849519cedb4b18f33de96a1bfa7f615f304df9215dc973029423c19689eaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f10c0032009486ff4d0e728b718ffe298807cee09d3cb8d39cee8795bc927a6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://865e45db3642879f721e641001b0ca8446a1f03ae8a51e0fe361028752bb6178\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://275f05f6fac1cdcc29b6596af9c5ede5072c3662cf386ce888edb90dfac1241c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6a68ddef9ca426d06117a8b2a90019998d113c808c085cd561d28123d3b1d440\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6fd6afc9b9e0f1d8381f44207f56f612b868d7b9318208f1ecef860c45788342\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ff3a11d42675f8f6e7b106efd0cc563481d4d9861c0c1060a6e35fd13e439d9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.193534 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7535739c-4128-4ffd-85d6-c29c78af64b2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8efc126efc82454c43b5c37e17a92c9d0305333733c6fc9fa47d4c9743b98cf3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a57257486de7a397dc3d4ddf5c1888a249029d3dc7d2d395ca835ea102251a7f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://03244aa8e6247e5038bdb1e3ca92500e70bb50262785862a0f0feb90dc52ccc6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.202495 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.202613 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.202635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.202706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.202726 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.220016 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://adc4a985105ee8656ebc380ca0c1fb2807e4db45dfd93e03c5f2bec7454ba9ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5c824cac39c6906c811e76d7eedd2dab0b6b32c801e75c369bd134223a4493ac\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.241575 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://912ab91cb91804ca61480d8da83c7d03404b78ac3073ff3a6c00cfda6de8df89\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.263759 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.286723 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.307138 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.307209 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.307227 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.307258 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.307280 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.314468 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"87424bac-c58b-4fae-8f44-443e202bf113\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e33291cd993ce9bf4f530c26ef9173efc5ebe7c14f4d97adf1ca6faecca830a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34d4dacb93aae72a884a929f508d533c7a34b29fad2cd4882d737dbac69d2981\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://45c3559066bf172bda169030d394f636130bedb39238f41aa5b696c67cb04a3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://492f3432f7012943de8da3ccdd0194a83b63a6ca536221c890b17038691170bf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d032d5449e467c3173e993d5fb822a7a4c09474edf3294dc4910283e28a982d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cb4d67528247d9d49b8330b491e9c835289aec67aa42bdb114e85bea5f102135\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:21Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5f750492949011859fe4ec7f3f917384d67a386c92463dfd5c18e57c6d76362\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lqtks\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gv2xq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.333928 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-mtbkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4afb28f7-2b2c-4ca3-bf32-30f314fa6d13\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://645c1d4ebe72d07cfc6743ead6641c33d6460047df25a0567c34ae5a8a6e0721\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-886hm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:08Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-mtbkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.352725 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"41d87265-f5cd-4b9b-8452-b8e30f10c51c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2ddfcf1ee5cd3d1223607128c54cdf5c250d9467340e5d21f9d2f169c96477d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ef91786e78714e5bc5fa9ecdc7ab24a543024c4a326115423d14225241f3fc6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.377791 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"15594eef-1c46-43e2-9910-088593c720de\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"file observer\\\\nW1206 05:20:04.898592 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1206 05:20:04.898723 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1206 05:20:04.899478 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-849439676/tls.crt::/tmp/serving-cert-849439676/tls.key\\\\\\\"\\\\nI1206 05:20:05.273905 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1206 05:20:05.276515 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1206 05:20:05.276537 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1206 05:20:05.276565 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1206 05:20:05.276575 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1206 05:20:05.289407 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1206 05:20:05.289437 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1206 05:20:05.289456 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289468 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1206 05:20:05.289477 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1206 05:20:05.289484 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1206 05:20:05.289490 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1206 05:20:05.289497 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1206 05:20:05.291652 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:49Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.398015 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"95f64f73-47d5-4156-8eb5-539fa23b4202\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://418fbe98ebc093b9ab0d935b32f34d145705cbcde20714629877fcca90111b22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac6c557fb022a4bf70551a123d98e7250ed6b425ea31531a32bcccd8bc16a173\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qcczw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-4k52q\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.410225 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.410303 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.410313 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.410350 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.410365 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.420678 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-rtxrp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"2025-12-06T05:20:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2\\\\n2025-12-06T05:20:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_f74b837f-07db-4a6b-a740-30a144ac39f2 to /host/opt/cni/bin/\\\\n2025-12-06T05:20:09Z [verbose] multus-daemon started\\\\n2025-12-06T05:20:09Z [verbose] Readiness Indicator file check\\\\n2025-12-06T05:20:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hnhxc\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:06Z\\\"}}\" for pod \"openshift-multus\"/\"multus-rtxrp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.458571 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-12-06T05:20:54Z\\\",\\\"message\\\":\\\"logical port openshift-network-diagnostics_network-check-target-xd92c for pod on switch crc\\\\nF1206 05:20:54.267115 6454 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:20:54Z is after 2025-08-24T17:21:41Z]\\\\nI1206 05:20:54.267107 6454 services_controller.go:451] Built service openshift-marketplace/redhat-operators cluster-wide LB for network=default: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-marketplace/redhat-operators_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-marketplace/redhat-operators\\\\\\\"}, Opts:services.LBOpts{Reject:true, E\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:53Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:21:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:20:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:20:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:20:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-7xwsz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:20:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-l5xg7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.482304 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c0127b51-745e-4842-8bc1-cf7d089d2e0d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-12-06T05:19:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b8fb840d935c74c1c2140b64a8ef515f18c0a6d8b42241a7e60395b9953b26af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7fa0a32b30295d13bc85e1990acbd75c714e91d2ab91c482335f607640f708c3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://bf4d10484bd3ad0243c71b33573bb8b47f177a9f4b2d13a227416f5f7d0d774d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-12-06T05:19:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc863340f478daba6fb7638ca9cf3f27b4cd0eb66a9f8e115dbf3d8b6a1c684a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-12-06T05:19:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-12-06T05:19:45Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-12-06T05:19:44Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.505775 4706 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-12-06T05:20:06Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-12-06T05:21:28Z is after 2025-08-24T17:21:41Z" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.513799 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.513901 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.513935 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.513976 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.513997 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.617501 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.617576 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.617598 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.617633 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.617653 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.721500 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.721581 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.721602 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.721637 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.721659 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.825396 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.825538 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.825562 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.825589 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.825608 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.929865 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.929972 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.929991 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.930017 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:28 crc kubenswrapper[4706]: I1206 05:21:28.930040 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:28Z","lastTransitionTime":"2025-12-06T05:21:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.033608 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.033680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.033707 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.033747 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.033772 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.035868 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:29 crc kubenswrapper[4706]: E1206 05:21:29.036316 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.137142 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.137256 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.137278 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.137306 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.137327 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.240557 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.240649 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.240681 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.240717 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.240738 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.344669 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.344746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.344770 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.344804 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.344825 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.450177 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.450254 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.450279 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.450311 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.450334 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.553664 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.553779 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.553815 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.553853 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.553879 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.657266 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.657314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.657325 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.657343 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.657355 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.759872 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.759926 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.759937 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.759954 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.759967 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.772903 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/3.log" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.773505 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/2.log" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.776740 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" exitCode=1 Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.776796 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.776857 4706 scope.go:117] "RemoveContainer" containerID="771a9b6a757e09d16b2b1f32898b1e54d7c428e2ae68e537a4d278914a85698f" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.778238 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:21:29 crc kubenswrapper[4706]: E1206 05:21:29.778451 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.862961 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.863018 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.863030 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.863077 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.863093 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.966902 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.966971 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.966983 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.966996 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:29 crc kubenswrapper[4706]: I1206 05:21:29.967006 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:29Z","lastTransitionTime":"2025-12-06T05:21:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.000860 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-rtxrp" podStartSLOduration=84.000825851 podStartE2EDuration="1m24.000825851s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:29.966644475 +0000 UTC m=+112.294468439" watchObservedRunningTime="2025-12-06 05:21:30.000825851 +0000 UTC m=+112.328649805" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.021133 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=58.021101594 podStartE2EDuration="58.021101594s" podCreationTimestamp="2025-12-06 05:20:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.020723954 +0000 UTC m=+112.348547918" watchObservedRunningTime="2025-12-06 05:21:30.021101594 +0000 UTC m=+112.348925558" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.035592 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.035666 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.035716 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:30 crc kubenswrapper[4706]: E1206 05:21:30.035834 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:30 crc kubenswrapper[4706]: E1206 05:21:30.036314 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:30 crc kubenswrapper[4706]: E1206 05:21:30.036627 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.070880 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.070959 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.070978 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.071012 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.071030 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.087669 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-zct8k" podStartSLOduration=86.087633894 podStartE2EDuration="1m26.087633894s" podCreationTimestamp="2025-12-06 05:20:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.087187413 +0000 UTC m=+112.415011417" watchObservedRunningTime="2025-12-06 05:21:30.087633894 +0000 UTC m=+112.415457868" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.103679 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podStartSLOduration=85.103652714 podStartE2EDuration="1m25.103652714s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.102792931 +0000 UTC m=+112.430616895" watchObservedRunningTime="2025-12-06 05:21:30.103652714 +0000 UTC m=+112.431476658" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.173799 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.173845 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.173856 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.173875 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.173888 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.183988 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=23.183952034 podStartE2EDuration="23.183952034s" podCreationTimestamp="2025-12-06 05:21:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.1587949 +0000 UTC m=+112.486618864" watchObservedRunningTime="2025-12-06 05:21:30.183952034 +0000 UTC m=+112.511775978" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.210543 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=84.210516315 podStartE2EDuration="1m24.210516315s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.185482374 +0000 UTC m=+112.513306318" watchObservedRunningTime="2025-12-06 05:21:30.210516315 +0000 UTC m=+112.538340269" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.276197 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.276256 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.276287 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.276307 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.276319 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.290741 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gv2xq" podStartSLOduration=84.290710662 podStartE2EDuration="1m24.290710662s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.289950822 +0000 UTC m=+112.617774786" watchObservedRunningTime="2025-12-06 05:21:30.290710662 +0000 UTC m=+112.618534646" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.306258 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-mtbkm" podStartSLOduration=85.306235858 podStartE2EDuration="1m25.306235858s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.306107824 +0000 UTC m=+112.633931768" watchObservedRunningTime="2025-12-06 05:21:30.306235858 +0000 UTC m=+112.634059802" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.343306 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=20.34327719 podStartE2EDuration="20.34327719s" podCreationTimestamp="2025-12-06 05:21:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.320443549 +0000 UTC m=+112.648267513" watchObservedRunningTime="2025-12-06 05:21:30.34327719 +0000 UTC m=+112.671101144" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.359657 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=84.359629358 podStartE2EDuration="1m24.359629358s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.343561648 +0000 UTC m=+112.671385612" watchObservedRunningTime="2025-12-06 05:21:30.359629358 +0000 UTC m=+112.687453302" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.359994 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-4k52q" podStartSLOduration=84.359990878 podStartE2EDuration="1m24.359990878s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:30.359258247 +0000 UTC m=+112.687082201" watchObservedRunningTime="2025-12-06 05:21:30.359990878 +0000 UTC m=+112.687814822" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.379670 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.379710 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.379722 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.379739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.379753 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.483744 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.483813 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.483832 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.483862 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.483881 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.587323 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.587408 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.587431 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.587463 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.587486 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.691230 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.691314 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.691339 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.691374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.691396 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.783801 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/3.log" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.793344 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.793396 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.793420 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.793452 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.793476 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.896578 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.897365 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.897427 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.897475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:30 crc kubenswrapper[4706]: I1206 05:21:30.897499 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:30Z","lastTransitionTime":"2025-12-06T05:21:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.000834 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.000900 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.000910 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.000930 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.000941 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.035955 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:31 crc kubenswrapper[4706]: E1206 05:21:31.036207 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.104624 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.104673 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.104687 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.104706 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.104719 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.208326 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.208425 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.208445 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.208475 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.208497 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.312416 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.312482 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.312505 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.312535 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.312557 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.418956 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.419116 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.419139 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.419173 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.419195 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.523008 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.523114 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.523142 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.523176 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.523201 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.626124 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.626202 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.626220 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.626250 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.626270 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.730299 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.730355 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.730374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.730402 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.730421 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.833329 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.833395 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.833418 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.833443 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.833459 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.936868 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.936928 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.936946 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.936976 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:31 crc kubenswrapper[4706]: I1206 05:21:31.937000 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:31Z","lastTransitionTime":"2025-12-06T05:21:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.036179 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.037312 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.037444 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.043739 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: E1206 05:21:32.043750 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.043812 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.044042 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.044150 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.044185 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: E1206 05:21:32.044253 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:32 crc kubenswrapper[4706]: E1206 05:21:32.044419 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.148434 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.148494 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.148511 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.148537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.148557 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.253549 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.253628 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.253648 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.253680 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.253702 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.356789 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.356878 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.356901 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.356926 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.356945 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.460316 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.460397 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.460421 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.460456 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.460483 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.565366 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.565480 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.565973 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.566042 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.566276 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.669240 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.669310 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.669329 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.669360 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.669380 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.772550 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.772614 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.772635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.772663 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.772683 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.876157 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.876240 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.876262 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.876299 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.876325 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.983227 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.983276 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.983290 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.983313 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:32 crc kubenswrapper[4706]: I1206 05:21:32.983330 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:32Z","lastTransitionTime":"2025-12-06T05:21:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.035601 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:33 crc kubenswrapper[4706]: E1206 05:21:33.035786 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.086646 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.086692 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.086705 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.086727 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.086744 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.191436 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.191537 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.191572 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.191606 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.191629 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.297140 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.297264 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.297338 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.297377 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.297397 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.401635 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.401725 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.401746 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.401777 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.401797 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.506213 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.506288 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.506308 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.506339 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.506362 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.610249 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.610326 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.610346 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.610374 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.610395 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.689655 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.689741 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.689760 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.689792 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.689823 4706 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-12-06T05:21:33Z","lastTransitionTime":"2025-12-06T05:21:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.760225 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7"] Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.760809 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.765173 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.765268 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.765273 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.765444 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.777763 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.777926 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.778357 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.778460 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.778671 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.879872 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.879951 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.879980 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.880009 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.880037 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.880498 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.880641 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.882866 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-service-ca\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.892614 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:33 crc kubenswrapper[4706]: I1206 05:21:33.907457 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/381bbfa5-d4e4-4864-ac55-d1c25b50acc0-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-5jfg7\" (UID: \"381bbfa5-d4e4-4864-ac55-d1c25b50acc0\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:34 crc kubenswrapper[4706]: I1206 05:21:34.036180 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:34 crc kubenswrapper[4706]: I1206 05:21:34.036186 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:34 crc kubenswrapper[4706]: E1206 05:21:34.036380 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:34 crc kubenswrapper[4706]: I1206 05:21:34.036690 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:34 crc kubenswrapper[4706]: E1206 05:21:34.036740 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:34 crc kubenswrapper[4706]: E1206 05:21:34.036899 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:34 crc kubenswrapper[4706]: I1206 05:21:34.093232 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" Dec 06 05:21:34 crc kubenswrapper[4706]: I1206 05:21:34.804754 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" event={"ID":"381bbfa5-d4e4-4864-ac55-d1c25b50acc0","Type":"ContainerStarted","Data":"50188ff6591ffc8077d0e739861553c0ad988bd79b427bfda819464a81f4910f"} Dec 06 05:21:35 crc kubenswrapper[4706]: I1206 05:21:35.035357 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:35 crc kubenswrapper[4706]: E1206 05:21:35.035561 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:35 crc kubenswrapper[4706]: I1206 05:21:35.810290 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" event={"ID":"381bbfa5-d4e4-4864-ac55-d1c25b50acc0","Type":"ContainerStarted","Data":"18417b761b4de1ac7e615b1e7725b755b2ae2b685fb07ed9598cbeec148f7508"} Dec 06 05:21:36 crc kubenswrapper[4706]: I1206 05:21:36.035288 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:36 crc kubenswrapper[4706]: I1206 05:21:36.035342 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:36 crc kubenswrapper[4706]: E1206 05:21:36.035487 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:36 crc kubenswrapper[4706]: I1206 05:21:36.035619 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:36 crc kubenswrapper[4706]: E1206 05:21:36.036770 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:36 crc kubenswrapper[4706]: E1206 05:21:36.036969 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:37 crc kubenswrapper[4706]: I1206 05:21:37.036172 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:37 crc kubenswrapper[4706]: E1206 05:21:37.036427 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:37 crc kubenswrapper[4706]: I1206 05:21:37.852008 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-5jfg7" podStartSLOduration=91.851974234 podStartE2EDuration="1m31.851974234s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:21:37.843346943 +0000 UTC m=+120.171170977" watchObservedRunningTime="2025-12-06 05:21:37.851974234 +0000 UTC m=+120.179798218" Dec 06 05:21:37 crc kubenswrapper[4706]: E1206 05:21:37.984288 4706 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Dec 06 05:21:38 crc kubenswrapper[4706]: I1206 05:21:38.035617 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:38 crc kubenswrapper[4706]: I1206 05:21:38.035748 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:38 crc kubenswrapper[4706]: I1206 05:21:38.037891 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:38 crc kubenswrapper[4706]: E1206 05:21:38.037881 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:38 crc kubenswrapper[4706]: E1206 05:21:38.038131 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:38 crc kubenswrapper[4706]: E1206 05:21:38.038425 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:39 crc kubenswrapper[4706]: I1206 05:21:39.036172 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:39 crc kubenswrapper[4706]: E1206 05:21:39.036404 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:40 crc kubenswrapper[4706]: I1206 05:21:40.035536 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:40 crc kubenswrapper[4706]: I1206 05:21:40.035540 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:40 crc kubenswrapper[4706]: E1206 05:21:40.035746 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:40 crc kubenswrapper[4706]: E1206 05:21:40.036033 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:40 crc kubenswrapper[4706]: I1206 05:21:40.036837 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:40 crc kubenswrapper[4706]: E1206 05:21:40.036938 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:41 crc kubenswrapper[4706]: I1206 05:21:41.035799 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:41 crc kubenswrapper[4706]: E1206 05:21:41.036287 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:42 crc kubenswrapper[4706]: I1206 05:21:42.035905 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:42 crc kubenswrapper[4706]: I1206 05:21:42.036143 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:42 crc kubenswrapper[4706]: I1206 05:21:42.036187 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:42 crc kubenswrapper[4706]: E1206 05:21:42.036343 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:42 crc kubenswrapper[4706]: I1206 05:21:42.036585 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:21:42 crc kubenswrapper[4706]: E1206 05:21:42.036726 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:42 crc kubenswrapper[4706]: E1206 05:21:42.036812 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:42 crc kubenswrapper[4706]: E1206 05:21:42.036791 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:21:42 crc kubenswrapper[4706]: E1206 05:21:42.598441 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.035769 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:43 crc kubenswrapper[4706]: E1206 05:21:43.036004 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.846124 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/1.log" Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.846956 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/0.log" Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.847088 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5" containerID="3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf" exitCode=1 Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.847141 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerDied","Data":"3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf"} Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.847204 4706 scope.go:117] "RemoveContainer" containerID="c8307b380d5a91974581c8e4e20e71757144a4ce30088d29a6cb44928005e920" Dec 06 05:21:43 crc kubenswrapper[4706]: I1206 05:21:43.847872 4706 scope.go:117] "RemoveContainer" containerID="3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf" Dec 06 05:21:43 crc kubenswrapper[4706]: E1206 05:21:43.848278 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-rtxrp_openshift-multus(f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5)\"" pod="openshift-multus/multus-rtxrp" podUID="f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5" Dec 06 05:21:44 crc kubenswrapper[4706]: I1206 05:21:44.035998 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:44 crc kubenswrapper[4706]: E1206 05:21:44.036144 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:44 crc kubenswrapper[4706]: I1206 05:21:44.036357 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:44 crc kubenswrapper[4706]: E1206 05:21:44.036418 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:44 crc kubenswrapper[4706]: I1206 05:21:44.036498 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:44 crc kubenswrapper[4706]: E1206 05:21:44.036541 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:44 crc kubenswrapper[4706]: I1206 05:21:44.853518 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/1.log" Dec 06 05:21:45 crc kubenswrapper[4706]: I1206 05:21:45.035442 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:45 crc kubenswrapper[4706]: E1206 05:21:45.035646 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:46 crc kubenswrapper[4706]: I1206 05:21:46.035588 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:46 crc kubenswrapper[4706]: I1206 05:21:46.035656 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:46 crc kubenswrapper[4706]: I1206 05:21:46.035767 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:46 crc kubenswrapper[4706]: E1206 05:21:46.035859 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:46 crc kubenswrapper[4706]: E1206 05:21:46.036005 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:46 crc kubenswrapper[4706]: E1206 05:21:46.036103 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:47 crc kubenswrapper[4706]: I1206 05:21:47.035631 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:47 crc kubenswrapper[4706]: E1206 05:21:47.035796 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:47 crc kubenswrapper[4706]: E1206 05:21:47.599882 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:21:48 crc kubenswrapper[4706]: I1206 05:21:48.036078 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:48 crc kubenswrapper[4706]: I1206 05:21:48.036146 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:48 crc kubenswrapper[4706]: I1206 05:21:48.036308 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:48 crc kubenswrapper[4706]: E1206 05:21:48.037169 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:48 crc kubenswrapper[4706]: E1206 05:21:48.037281 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:48 crc kubenswrapper[4706]: E1206 05:21:48.037383 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:49 crc kubenswrapper[4706]: I1206 05:21:49.036108 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:49 crc kubenswrapper[4706]: E1206 05:21:49.036341 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:50 crc kubenswrapper[4706]: I1206 05:21:50.036187 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:50 crc kubenswrapper[4706]: I1206 05:21:50.036278 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:50 crc kubenswrapper[4706]: I1206 05:21:50.036187 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:50 crc kubenswrapper[4706]: E1206 05:21:50.036493 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:50 crc kubenswrapper[4706]: E1206 05:21:50.036572 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:50 crc kubenswrapper[4706]: E1206 05:21:50.036635 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:51 crc kubenswrapper[4706]: I1206 05:21:51.035983 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:51 crc kubenswrapper[4706]: E1206 05:21:51.036205 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:52 crc kubenswrapper[4706]: I1206 05:21:52.035634 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:52 crc kubenswrapper[4706]: I1206 05:21:52.035840 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:52 crc kubenswrapper[4706]: I1206 05:21:52.035929 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:52 crc kubenswrapper[4706]: E1206 05:21:52.035987 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:52 crc kubenswrapper[4706]: E1206 05:21:52.036206 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:52 crc kubenswrapper[4706]: E1206 05:21:52.036338 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:52 crc kubenswrapper[4706]: E1206 05:21:52.600792 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:21:53 crc kubenswrapper[4706]: I1206 05:21:53.035579 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:53 crc kubenswrapper[4706]: E1206 05:21:53.035824 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:54 crc kubenswrapper[4706]: I1206 05:21:54.035846 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:54 crc kubenswrapper[4706]: I1206 05:21:54.035917 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:54 crc kubenswrapper[4706]: E1206 05:21:54.036160 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:54 crc kubenswrapper[4706]: I1206 05:21:54.036226 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:54 crc kubenswrapper[4706]: E1206 05:21:54.036436 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:54 crc kubenswrapper[4706]: E1206 05:21:54.036586 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:55 crc kubenswrapper[4706]: I1206 05:21:55.036465 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:55 crc kubenswrapper[4706]: E1206 05:21:55.036661 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:56 crc kubenswrapper[4706]: I1206 05:21:56.039867 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:56 crc kubenswrapper[4706]: I1206 05:21:56.039920 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:56 crc kubenswrapper[4706]: I1206 05:21:56.040287 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:56 crc kubenswrapper[4706]: E1206 05:21:56.040663 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:56 crc kubenswrapper[4706]: E1206 05:21:56.040827 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:56 crc kubenswrapper[4706]: I1206 05:21:56.040894 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:21:56 crc kubenswrapper[4706]: E1206 05:21:56.040966 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:56 crc kubenswrapper[4706]: E1206 05:21:56.041148 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-l5xg7_openshift-ovn-kubernetes(a4bbd5a9-5b78-4e07-b4af-e10d4768de95)\"" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" Dec 06 05:21:57 crc kubenswrapper[4706]: I1206 05:21:57.035781 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:57 crc kubenswrapper[4706]: E1206 05:21:57.036180 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:57 crc kubenswrapper[4706]: E1206 05:21:57.602245 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:21:58 crc kubenswrapper[4706]: I1206 05:21:58.035230 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:21:58 crc kubenswrapper[4706]: I1206 05:21:58.035213 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:21:58 crc kubenswrapper[4706]: I1206 05:21:58.035321 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:21:58 crc kubenswrapper[4706]: E1206 05:21:58.036504 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:21:58 crc kubenswrapper[4706]: E1206 05:21:58.036708 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:21:58 crc kubenswrapper[4706]: E1206 05:21:58.036918 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:21:59 crc kubenswrapper[4706]: I1206 05:21:59.036097 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:21:59 crc kubenswrapper[4706]: E1206 05:21:59.036444 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:21:59 crc kubenswrapper[4706]: I1206 05:21:59.036692 4706 scope.go:117] "RemoveContainer" containerID="3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf" Dec 06 05:21:59 crc kubenswrapper[4706]: I1206 05:21:59.912616 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/1.log" Dec 06 05:21:59 crc kubenswrapper[4706]: I1206 05:21:59.913079 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerStarted","Data":"797e4fef63c5c93dfa90e5d32f66cfdc8814ee8ea24d8c8a5751ba1b17fc9401"} Dec 06 05:22:00 crc kubenswrapper[4706]: I1206 05:22:00.036224 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:00 crc kubenswrapper[4706]: I1206 05:22:00.036307 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:00 crc kubenswrapper[4706]: I1206 05:22:00.036253 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:00 crc kubenswrapper[4706]: E1206 05:22:00.036514 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:00 crc kubenswrapper[4706]: E1206 05:22:00.036641 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:00 crc kubenswrapper[4706]: E1206 05:22:00.036754 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:01 crc kubenswrapper[4706]: I1206 05:22:01.035206 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:01 crc kubenswrapper[4706]: E1206 05:22:01.035424 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:02 crc kubenswrapper[4706]: I1206 05:22:02.035404 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:02 crc kubenswrapper[4706]: E1206 05:22:02.035655 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:02 crc kubenswrapper[4706]: I1206 05:22:02.035398 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:02 crc kubenswrapper[4706]: I1206 05:22:02.035714 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:02 crc kubenswrapper[4706]: E1206 05:22:02.035849 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:02 crc kubenswrapper[4706]: E1206 05:22:02.036256 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:02 crc kubenswrapper[4706]: E1206 05:22:02.603272 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:22:03 crc kubenswrapper[4706]: I1206 05:22:03.049074 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:03 crc kubenswrapper[4706]: E1206 05:22:03.049234 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:04 crc kubenswrapper[4706]: I1206 05:22:04.036017 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:04 crc kubenswrapper[4706]: I1206 05:22:04.036097 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:04 crc kubenswrapper[4706]: I1206 05:22:04.036097 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:04 crc kubenswrapper[4706]: E1206 05:22:04.036217 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:04 crc kubenswrapper[4706]: E1206 05:22:04.036362 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:04 crc kubenswrapper[4706]: E1206 05:22:04.036432 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:05 crc kubenswrapper[4706]: I1206 05:22:05.035103 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:05 crc kubenswrapper[4706]: E1206 05:22:05.035273 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:06 crc kubenswrapper[4706]: I1206 05:22:06.036118 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:06 crc kubenswrapper[4706]: I1206 05:22:06.036164 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:06 crc kubenswrapper[4706]: E1206 05:22:06.036280 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:06 crc kubenswrapper[4706]: I1206 05:22:06.036486 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:06 crc kubenswrapper[4706]: E1206 05:22:06.036556 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:06 crc kubenswrapper[4706]: E1206 05:22:06.036690 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:07 crc kubenswrapper[4706]: I1206 05:22:07.035197 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:07 crc kubenswrapper[4706]: E1206 05:22:07.035432 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:07 crc kubenswrapper[4706]: E1206 05:22:07.605324 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:22:08 crc kubenswrapper[4706]: I1206 05:22:08.049134 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:08 crc kubenswrapper[4706]: I1206 05:22:08.049211 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:08 crc kubenswrapper[4706]: I1206 05:22:08.049222 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:08 crc kubenswrapper[4706]: E1206 05:22:08.050358 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:08 crc kubenswrapper[4706]: E1206 05:22:08.050465 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:08 crc kubenswrapper[4706]: E1206 05:22:08.050618 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:09 crc kubenswrapper[4706]: I1206 05:22:09.035759 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:09 crc kubenswrapper[4706]: E1206 05:22:09.036464 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:10 crc kubenswrapper[4706]: I1206 05:22:10.041522 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:10 crc kubenswrapper[4706]: I1206 05:22:10.041567 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:10 crc kubenswrapper[4706]: I1206 05:22:10.041521 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:10 crc kubenswrapper[4706]: E1206 05:22:10.041679 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:10 crc kubenswrapper[4706]: E1206 05:22:10.041890 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:10 crc kubenswrapper[4706]: E1206 05:22:10.041981 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:11 crc kubenswrapper[4706]: I1206 05:22:11.035479 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:11 crc kubenswrapper[4706]: E1206 05:22:11.036101 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:11 crc kubenswrapper[4706]: I1206 05:22:11.036501 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.035153 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.035153 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.035357 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:12 crc kubenswrapper[4706]: E1206 05:22:12.035514 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:12 crc kubenswrapper[4706]: E1206 05:22:12.035676 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:12 crc kubenswrapper[4706]: E1206 05:22:12.035811 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:12 crc kubenswrapper[4706]: E1206 05:22:12.606475 4706 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.943405 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4ltjs"] Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.943542 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:12 crc kubenswrapper[4706]: E1206 05:22:12.943665 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.959503 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/3.log" Dec 06 05:22:12 crc kubenswrapper[4706]: I1206 05:22:12.965471 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerStarted","Data":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} Dec 06 05:22:13 crc kubenswrapper[4706]: I1206 05:22:13.969648 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.016832 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podStartSLOduration=128.016800253 podStartE2EDuration="2m8.016800253s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:14.016496395 +0000 UTC m=+156.344320429" watchObservedRunningTime="2025-12-06 05:22:14.016800253 +0000 UTC m=+156.344624287" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.026241 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.026505 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:24:16.026470013 +0000 UTC m=+278.354293997 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.026650 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.026810 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.026982 4706 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.027168 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:24:16.027129571 +0000 UTC m=+278.354953545 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.027161 4706 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.027364 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-12-06 05:24:16.027323296 +0000 UTC m=+278.355147270 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.035070 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.035232 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.035453 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.035568 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.035847 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.036038 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.127829 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:14 crc kubenswrapper[4706]: I1206 05:22:14.127879 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128008 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128024 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128036 4706 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128109 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-12-06 05:24:16.128093545 +0000 UTC m=+278.455917489 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128102 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128149 4706 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128161 4706 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:22:14 crc kubenswrapper[4706]: E1206 05:22:14.128224 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-12-06 05:24:16.128201858 +0000 UTC m=+278.456025862 (durationBeforeRetry 2m2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Dec 06 05:22:15 crc kubenswrapper[4706]: I1206 05:22:15.035690 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:15 crc kubenswrapper[4706]: E1206 05:22:15.035976 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:16 crc kubenswrapper[4706]: I1206 05:22:16.035723 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:16 crc kubenswrapper[4706]: I1206 05:22:16.035773 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:16 crc kubenswrapper[4706]: I1206 05:22:16.035885 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:16 crc kubenswrapper[4706]: E1206 05:22:16.036557 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Dec 06 05:22:16 crc kubenswrapper[4706]: E1206 05:22:16.036659 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Dec 06 05:22:16 crc kubenswrapper[4706]: E1206 05:22:16.037665 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Dec 06 05:22:17 crc kubenswrapper[4706]: I1206 05:22:17.035464 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:17 crc kubenswrapper[4706]: E1206 05:22:17.035672 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-4ltjs" podUID="f4065785-c72e-4c45-ab51-ce292be4f2ed" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.036042 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.037530 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.037665 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.040167 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.040565 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.041009 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 06 05:22:18 crc kubenswrapper[4706]: I1206 05:22:18.040236 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 06 05:22:19 crc kubenswrapper[4706]: I1206 05:22:19.036083 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:19 crc kubenswrapper[4706]: I1206 05:22:19.040305 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 06 05:22:19 crc kubenswrapper[4706]: I1206 05:22:19.040395 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.844484 4706 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.902971 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pmvgs"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.904195 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.905374 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k8g95"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.906087 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.914119 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-5g2s4"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.916507 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.917996 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.918070 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.918382 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.918968 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.942188 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.942977 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.947593 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.947935 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948016 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.950520 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948145 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948257 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948286 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948370 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948402 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948432 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.948487 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.949920 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.950013 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.950066 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.950102 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.950502 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.950663 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.954137 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-n6bpb"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.954518 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.955184 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.959882 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lzm5j"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.963019 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-5n589"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.966433 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.966801 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.972568 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.973506 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.973694 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.973984 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-audit\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974036 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/445fbc3d-3a2f-4361-8444-badce4d8e564-node-pullsecrets\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974084 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-serving-cert\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974106 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974125 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974144 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-client-ca\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974170 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-encryption-config\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974189 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4230f0fb-f05e-4ae6-9755-db33865a6c33-images\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974209 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srdjb\" (UniqueName: \"kubernetes.io/projected/4230f0fb-f05e-4ae6-9755-db33865a6c33-kube-api-access-srdjb\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974229 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4btx\" (UniqueName: \"kubernetes.io/projected/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-kube-api-access-z4btx\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974250 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-etcd-serving-ca\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974272 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-image-import-ca\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974320 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-etcd-client\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974339 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/445fbc3d-3a2f-4361-8444-badce4d8e564-audit-dir\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974363 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4230f0fb-f05e-4ae6-9755-db33865a6c33-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974383 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-config\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974406 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-config\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974440 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-serving-cert\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974465 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4230f0fb-f05e-4ae6-9755-db33865a6c33-config\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.974489 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfs6q\" (UniqueName: \"kubernetes.io/projected/445fbc3d-3a2f-4361-8444-badce4d8e564-kube-api-access-cfs6q\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.977132 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.977843 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.978414 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.978969 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.980271 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.981176 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.984731 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zvsj5"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.987596 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.987871 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.988162 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-t4xd8"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.988465 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.988872 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-njll8"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.989304 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.989394 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pmvgs"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.990198 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.990403 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.990744 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.990854 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.990972 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.991226 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.991336 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.991515 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.992254 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jhgqt"] Dec 06 05:22:24 crc kubenswrapper[4706]: I1206 05:22:24.992920 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xpmpp"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.003730 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.004145 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.005270 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.008101 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.008311 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.009398 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.009842 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:24.981407 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.010142 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.010423 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.014493 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015164 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015442 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015492 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015559 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015602 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015720 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015724 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015814 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015830 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015561 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015892 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015907 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.015831 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.016002 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.016113 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.016142 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.016219 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.016304 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.016355 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.018856 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.019134 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.025020 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.025578 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.025926 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026275 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026363 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026480 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026557 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026593 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026621 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026704 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.026771 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.027260 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.027321 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.027574 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.027757 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.028348 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.028644 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.029652 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.030238 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.030309 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.030564 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.031291 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.031899 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-l9d42"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.032546 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.037941 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.038701 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.039498 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.039812 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.039901 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.040072 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.040170 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.042649 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.042871 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.043659 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.043837 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.043923 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.043838 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.044136 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.044646 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.044797 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.044808 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.044861 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045016 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045149 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045668 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045891 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045916 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045939 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.046099 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.045894 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.046531 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.047144 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.047393 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.048875 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.048950 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.049099 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qwdgg"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.049824 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.051749 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.052535 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-5g2s4"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.052685 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.053736 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.054338 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.055103 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.055949 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.059772 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.060419 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.060584 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.061201 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.062183 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xptzp"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.075172 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.075357 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.077098 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.077440 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.079820 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.079871 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-client-ca\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.079922 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g2zfj\" (UniqueName: \"kubernetes.io/projected/76a85e06-bb22-4260-8a17-639478f9b3ca-kube-api-access-g2zfj\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.079952 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/152ec06e-1c86-4db5-87a5-a96da88e008e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.079983 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h48l6\" (UniqueName: \"kubernetes.io/projected/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-kube-api-access-h48l6\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.080007 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-encryption-config\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.080016 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.080182 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76a85e06-bb22-4260-8a17-639478f9b3ca-serving-cert\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.080220 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4230f0fb-f05e-4ae6-9755-db33865a6c33-images\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.080350 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.084120 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4230f0fb-f05e-4ae6-9755-db33865a6c33-images\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.084776 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.085767 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/58a1210d-91bd-4a47-b70a-c8026a238565-available-featuregates\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.085832 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srdjb\" (UniqueName: \"kubernetes.io/projected/4230f0fb-f05e-4ae6-9755-db33865a6c33-kube-api-access-srdjb\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.085861 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4btx\" (UniqueName: \"kubernetes.io/projected/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-kube-api-access-z4btx\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.088267 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-etcd-serving-ca\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.090599 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-client-ca\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091105 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091123 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091177 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-image-import-ca\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091244 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-etcd-client\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091270 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58a1210d-91bd-4a47-b70a-c8026a238565-serving-cert\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091294 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091321 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/445fbc3d-3a2f-4361-8444-badce4d8e564-audit-dir\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091344 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96qp2\" (UniqueName: \"kubernetes.io/projected/152ec06e-1c86-4db5-87a5-a96da88e008e-kube-api-access-96qp2\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091376 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4230f0fb-f05e-4ae6-9755-db33865a6c33-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091412 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/152ec06e-1c86-4db5-87a5-a96da88e008e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091443 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-config\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091466 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-config\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091502 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/76a85e06-bb22-4260-8a17-639478f9b3ca-trusted-ca\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091533 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a68f594e-b151-4902-9792-b5d6051525dd-config\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091563 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-serving-cert\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091585 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4230f0fb-f05e-4ae6-9755-db33865a6c33-config\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091614 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfs6q\" (UniqueName: \"kubernetes.io/projected/445fbc3d-3a2f-4361-8444-badce4d8e564-kube-api-access-cfs6q\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091648 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-audit\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091684 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/445fbc3d-3a2f-4361-8444-badce4d8e564-node-pullsecrets\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091706 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a68f594e-b151-4902-9792-b5d6051525dd-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091729 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a68f594e-b151-4902-9792-b5d6051525dd-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091753 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-serving-cert\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091775 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76a85e06-bb22-4260-8a17-639478f9b3ca-config\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091802 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091828 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlpjk\" (UniqueName: \"kubernetes.io/projected/58a1210d-91bd-4a47-b70a-c8026a238565-kube-api-access-hlpjk\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.091913 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-etcd-serving-ca\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.092251 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.092290 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-image-import-ca\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.092375 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/445fbc3d-3a2f-4361-8444-badce4d8e564-audit-dir\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.092524 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/445fbc3d-3a2f-4361-8444-badce4d8e564-node-pullsecrets\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.094592 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9gzzw"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.094947 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-audit\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.095283 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4230f0fb-f05e-4ae6-9755-db33865a6c33-config\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.095356 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.095678 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-config\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.096011 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.096887 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/445fbc3d-3a2f-4361-8444-badce4d8e564-config\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.097228 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.099284 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-serving-cert\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.100689 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.100727 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-encryption-config\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.101606 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.102898 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-serving-cert\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.103206 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.104009 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.105481 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.106341 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.114320 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-6xms4"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.114937 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.115428 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-bx4tb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.115729 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.115835 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.116240 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.116373 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.116499 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.116504 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-t4xd8"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.120118 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.120171 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.120213 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/445fbc3d-3a2f-4361-8444-badce4d8e564-etcd-client\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.120887 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-njll8"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.121310 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.122210 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k8g95"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.123196 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/4230f0fb-f05e-4ae6-9755-db33865a6c33-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.124510 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-5n589"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.125351 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zvsj5"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.126695 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.127132 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.128305 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.129993 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.131100 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.133382 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lzm5j"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.134432 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.135948 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.137299 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xpmpp"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.139502 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.141721 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jhgqt"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.143262 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-n6bpb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.146983 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.146997 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.149705 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.150674 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9gzzw"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.152562 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qwdgg"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.154322 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.156415 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.158281 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.160114 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.161841 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.163160 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.164087 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xptzp"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.165145 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-jk2pn"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.168180 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4gjwk"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.168460 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.168634 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.172134 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.172168 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-bx4tb"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.172180 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.172287 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.174356 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-6xms4"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.177837 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.180261 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jk2pn"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.183418 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4gjwk"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.186651 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.186715 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-gm4d4"] Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.187358 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.192611 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlpjk\" (UniqueName: \"kubernetes.io/projected/58a1210d-91bd-4a47-b70a-c8026a238565-kube-api-access-hlpjk\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.192765 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g2zfj\" (UniqueName: \"kubernetes.io/projected/76a85e06-bb22-4260-8a17-639478f9b3ca-kube-api-access-g2zfj\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.192877 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h48l6\" (UniqueName: \"kubernetes.io/projected/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-kube-api-access-h48l6\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.192977 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/152ec06e-1c86-4db5-87a5-a96da88e008e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193109 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76a85e06-bb22-4260-8a17-639478f9b3ca-serving-cert\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193251 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193374 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/58a1210d-91bd-4a47-b70a-c8026a238565-available-featuregates\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193519 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193659 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58a1210d-91bd-4a47-b70a-c8026a238565-serving-cert\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193823 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193930 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96qp2\" (UniqueName: \"kubernetes.io/projected/152ec06e-1c86-4db5-87a5-a96da88e008e-kube-api-access-96qp2\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.194029 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/152ec06e-1c86-4db5-87a5-a96da88e008e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.194499 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/76a85e06-bb22-4260-8a17-639478f9b3ca-trusted-ca\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.193887 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/58a1210d-91bd-4a47-b70a-c8026a238565-available-featuregates\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.195651 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/76a85e06-bb22-4260-8a17-639478f9b3ca-trusted-ca\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.195860 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.196287 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58a1210d-91bd-4a47-b70a-c8026a238565-serving-cert\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.196552 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/152ec06e-1c86-4db5-87a5-a96da88e008e-config\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.194657 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a68f594e-b151-4902-9792-b5d6051525dd-config\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.196923 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a68f594e-b151-4902-9792-b5d6051525dd-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.197255 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a68f594e-b151-4902-9792-b5d6051525dd-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.197465 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76a85e06-bb22-4260-8a17-639478f9b3ca-config\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.198038 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.198180 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/76a85e06-bb22-4260-8a17-639478f9b3ca-config\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.200391 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/152ec06e-1c86-4db5-87a5-a96da88e008e-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.206760 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.208472 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/76a85e06-bb22-4260-8a17-639478f9b3ca-serving-cert\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.226546 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.247194 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.254283 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.266230 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.287260 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.307041 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.328731 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.348365 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.366777 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.393578 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.408198 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.427723 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.446369 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.466851 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.486925 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.507899 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.527889 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.547343 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.567711 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.586859 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.607205 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.627038 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.636485 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a68f594e-b151-4902-9792-b5d6051525dd-config\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.648372 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.667633 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.681539 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a68f594e-b151-4902-9792-b5d6051525dd-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.687755 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.707560 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.726947 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.746573 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.766615 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.786597 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.808446 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.826753 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.847735 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.867778 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.907247 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.927247 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.947743 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.966907 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 06 05:22:25 crc kubenswrapper[4706]: I1206 05:22:25.987248 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.028423 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.047225 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.065915 4706 request.go:700] Waited for 1.01287831s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-api/secrets?fieldSelector=metadata.name%3Dcontrol-plane-machine-set-operator-dockercfg-k9rxt&limit=500&resourceVersion=0 Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.068292 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.087556 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.108210 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.127418 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.146474 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.167593 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.187310 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.208140 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.227507 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.247576 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.267897 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.309024 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.310338 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.355634 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srdjb\" (UniqueName: \"kubernetes.io/projected/4230f0fb-f05e-4ae6-9755-db33865a6c33-kube-api-access-srdjb\") pod \"machine-api-operator-5694c8668f-5g2s4\" (UID: \"4230f0fb-f05e-4ae6-9755-db33865a6c33\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.371171 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4btx\" (UniqueName: \"kubernetes.io/projected/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-kube-api-access-z4btx\") pod \"controller-manager-879f6c89f-pmvgs\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.384225 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfs6q\" (UniqueName: \"kubernetes.io/projected/445fbc3d-3a2f-4361-8444-badce4d8e564-kube-api-access-cfs6q\") pod \"apiserver-76f77b778f-k8g95\" (UID: \"445fbc3d-3a2f-4361-8444-badce4d8e564\") " pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.387843 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.407345 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.427529 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.443705 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.446884 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.459791 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.468160 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.487471 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.508576 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.527983 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.533915 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.547713 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.567611 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.588245 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.608348 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.628569 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.648448 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.668983 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.688634 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.707191 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.729085 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.740662 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k8g95"] Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.748310 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.750527 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pmvgs"] Dec 06 05:22:26 crc kubenswrapper[4706]: W1206 05:22:26.758067 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4dfbfadd_8faa_4b55_b8a4_5b5bf8e5c077.slice/crio-94a430b52caaabe00b9c80646b9e3420fd831cf54f69b6c04ce0fa2d65c1a9a8 WatchSource:0}: Error finding container 94a430b52caaabe00b9c80646b9e3420fd831cf54f69b6c04ce0fa2d65c1a9a8: Status 404 returned error can't find the container with id 94a430b52caaabe00b9c80646b9e3420fd831cf54f69b6c04ce0fa2d65c1a9a8 Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.767475 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.786416 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.794490 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-5g2s4"] Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.807151 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: W1206 05:22:26.808566 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4230f0fb_f05e_4ae6_9755_db33865a6c33.slice/crio-7386289ab11970ff9849b0e9e5e860479d4403addef6a78c93402f5980abe47c WatchSource:0}: Error finding container 7386289ab11970ff9849b0e9e5e860479d4403addef6a78c93402f5980abe47c: Status 404 returned error can't find the container with id 7386289ab11970ff9849b0e9e5e860479d4403addef6a78c93402f5980abe47c Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.828684 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.847212 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.867131 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.887309 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.907739 4706 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.926991 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.948011 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.968087 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 06 05:22:26 crc kubenswrapper[4706]: I1206 05:22:26.988011 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.008180 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.038162 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" event={"ID":"4230f0fb-f05e-4ae6-9755-db33865a6c33","Type":"ContainerStarted","Data":"7386289ab11970ff9849b0e9e5e860479d4403addef6a78c93402f5980abe47c"} Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.040286 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" event={"ID":"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077","Type":"ContainerStarted","Data":"94a430b52caaabe00b9c80646b9e3420fd831cf54f69b6c04ce0fa2d65c1a9a8"} Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.042450 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" event={"ID":"445fbc3d-3a2f-4361-8444-badce4d8e564","Type":"ContainerStarted","Data":"5ae0172eea3eec6c547bd519080b2081df98d6269216804c584dfd68e5a1cd8e"} Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.047806 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlpjk\" (UniqueName: \"kubernetes.io/projected/58a1210d-91bd-4a47-b70a-c8026a238565-kube-api-access-hlpjk\") pod \"openshift-config-operator-7777fb866f-5n589\" (UID: \"58a1210d-91bd-4a47-b70a-c8026a238565\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.063496 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g2zfj\" (UniqueName: \"kubernetes.io/projected/76a85e06-bb22-4260-8a17-639478f9b3ca-kube-api-access-g2zfj\") pod \"console-operator-58897d9998-zvsj5\" (UID: \"76a85e06-bb22-4260-8a17-639478f9b3ca\") " pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.084718 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h48l6\" (UniqueName: \"kubernetes.io/projected/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-kube-api-access-h48l6\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.084869 4706 request.go:700] Waited for 1.891406365s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/serviceaccounts/cluster-image-registry-operator/token Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.102339 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-64pm9\" (UID: \"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.135012 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96qp2\" (UniqueName: \"kubernetes.io/projected/152ec06e-1c86-4db5-87a5-a96da88e008e-kube-api-access-96qp2\") pod \"openshift-apiserver-operator-796bbdcf4f-bxpjq\" (UID: \"152ec06e-1c86-4db5-87a5-a96da88e008e\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.150528 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a68f594e-b151-4902-9792-b5d6051525dd-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-sj74z\" (UID: \"a68f594e-b151-4902-9792-b5d6051525dd\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.179276 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.193826 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223672 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nlj9g\" (UniqueName: \"kubernetes.io/projected/ed24741b-5476-4f20-bd17-4c8686d40419-kube-api-access-nlj9g\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223740 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-certificates\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223784 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/019227a7-15fd-4c90-8807-f5aef16b2b10-serving-cert\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223825 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/57c94140-5c17-4423-82aa-e62f070fa68c-auth-proxy-config\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223882 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbcef7ec-a2f0-4363-93e6-772d6d35d571-ca-trust-extracted\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223915 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-config\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223950 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.223991 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbcef7ec-a2f0-4363-93e6-772d6d35d571-installation-pull-secrets\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224091 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57c94140-5c17-4423-82aa-e62f070fa68c-config\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224135 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4edab72e-ed84-4e90-86da-02b3d3aa33bf-proxy-tls\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224169 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5a20850-1d32-4041-881c-098e06ecd4f8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224199 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/67f8479e-b919-4de1-8357-2fd41bf205a6-srv-cert\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224239 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-serving-cert\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224287 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-stats-auth\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224322 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-dir\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224354 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224386 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6f29acc2-2357-4418-9680-e743ccba8702-metrics-tls\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224420 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224452 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/33133042-30b9-487e-8ee4-097e0faf7673-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-db62k\" (UID: \"33133042-30b9-487e-8ee4-097e0faf7673\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224509 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp268\" (UniqueName: \"kubernetes.io/projected/6b76376c-f080-4458-a87a-84eab1e4b86d-kube-api-access-kp268\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224542 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f29acc2-2357-4418-9680-e743ccba8702-bound-sa-token\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224572 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-policies\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224608 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-bound-sa-token\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224643 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-service-ca\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224675 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4edab72e-ed84-4e90-86da-02b3d3aa33bf-auth-proxy-config\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224709 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dw22m\" (UniqueName: \"kubernetes.io/projected/de9628dc-df47-4a48-898b-f85d33e59452-kube-api-access-dw22m\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224742 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224774 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-audit-policies\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224804 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-client\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224854 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224902 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-oauth-config\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224936 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-encryption-config\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.224970 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225004 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-serving-cert\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225033 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-default-certificate\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225092 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-service-ca-bundle\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225127 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f03df2-7ec0-403c-8b72-1933efc742f5-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225160 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4edab72e-ed84-4e90-86da-02b3d3aa33bf-images\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225205 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-etcd-client\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225236 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f29acc2-2357-4418-9680-e743ccba8702-trusted-ca\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225267 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g4z5\" (UniqueName: \"kubernetes.io/projected/6fdda9bf-9941-4f82-958f-22657d41aa74-kube-api-access-8g4z5\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225301 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-console-config\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225333 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225364 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-tls\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225394 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/57c94140-5c17-4423-82aa-e62f070fa68c-machine-approver-tls\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225427 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhs5q\" (UniqueName: \"kubernetes.io/projected/019227a7-15fd-4c90-8807-f5aef16b2b10-kube-api-access-xhs5q\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225456 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb964fa0-1524-42ab-b399-8e9a7e7e3543-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225489 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb86p\" (UniqueName: \"kubernetes.io/projected/f5a20850-1d32-4041-881c-098e06ecd4f8-kube-api-access-pb86p\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225522 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225553 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-oauth-serving-cert\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225585 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpbxs\" (UniqueName: \"kubernetes.io/projected/25f03df2-7ec0-403c-8b72-1933efc742f5-kube-api-access-vpbxs\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225616 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-config\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225648 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-client-ca\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225694 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225732 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-trusted-ca\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225764 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-metrics-certs\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225799 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vd5m\" (UniqueName: \"kubernetes.io/projected/eba39d45-3292-48d5-be72-9f948b5ff2fe-kube-api-access-9vd5m\") pod \"dns-operator-744455d44c-jhgqt\" (UID: \"eba39d45-3292-48d5-be72-9f948b5ff2fe\") " pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225849 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225881 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sghmn\" (UniqueName: \"kubernetes.io/projected/33133042-30b9-487e-8ee4-097e0faf7673-kube-api-access-sghmn\") pod \"cluster-samples-operator-665b6dd947-db62k\" (UID: \"33133042-30b9-487e-8ee4-097e0faf7673\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225913 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-djjxn\" (UniqueName: \"kubernetes.io/projected/57c94140-5c17-4423-82aa-e62f070fa68c-kube-api-access-djjxn\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225947 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jns2h\" (UniqueName: \"kubernetes.io/projected/67f8479e-b919-4de1-8357-2fd41bf205a6-kube-api-access-jns2h\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.225978 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-config\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226011 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jmlj\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-kube-api-access-9jmlj\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226102 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226134 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b76376c-f080-4458-a87a-84eab1e4b86d-serving-cert\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226164 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fdda9bf-9941-4f82-958f-22657d41aa74-serving-cert\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226220 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwclk\" (UniqueName: \"kubernetes.io/projected/4edab72e-ed84-4e90-86da-02b3d3aa33bf-kube-api-access-wwclk\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226255 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/67f8479e-b919-4de1-8357-2fd41bf205a6-profile-collector-cert\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226300 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sdt5\" (UniqueName: \"kubernetes.io/projected/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-kube-api-access-7sdt5\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226336 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25f03df2-7ec0-403c-8b72-1933efc742f5-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226369 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eba39d45-3292-48d5-be72-9f948b5ff2fe-metrics-tls\") pod \"dns-operator-744455d44c-jhgqt\" (UID: \"eba39d45-3292-48d5-be72-9f948b5ff2fe\") " pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226402 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226465 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226499 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hjnjp\" (UniqueName: \"kubernetes.io/projected/f05088c1-1548-4c56-8e14-3610540dec5c-kube-api-access-hjnjp\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226551 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f05088c1-1548-4c56-8e14-3610540dec5c-audit-dir\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226596 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a20850-1d32-4041-881c-098e06ecd4f8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226627 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-service-ca\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226662 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4h2rs\" (UniqueName: \"kubernetes.io/projected/6f29acc2-2357-4418-9680-e743ccba8702-kube-api-access-4h2rs\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226714 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-trusted-ca-bundle\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226786 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb964fa0-1524-42ab-b399-8e9a7e7e3543-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226817 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226851 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de9628dc-df47-4a48-898b-f85d33e59452-service-ca-bundle\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226885 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb964fa0-1524-42ab-b399-8e9a7e7e3543-config\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226919 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-ca\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.226951 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.230277 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:27.73025579 +0000 UTC m=+170.058079764 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.261813 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.274797 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.328777 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.329225 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:27.82918752 +0000 UTC m=+170.157011504 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.329652 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de9628dc-df47-4a48-898b-f85d33e59452-service-ca-bundle\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.329711 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.329752 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmvf7\" (UniqueName: \"kubernetes.io/projected/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-kube-api-access-dmvf7\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.329796 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb964fa0-1524-42ab-b399-8e9a7e7e3543-config\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.330702 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb964fa0-1524-42ab-b399-8e9a7e7e3543-config\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331379 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/de9628dc-df47-4a48-898b-f85d33e59452-service-ca-bundle\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331456 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a417f08a-e64f-4a02-abb3-bee2049eb2e7-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-wdsds\" (UID: \"a417f08a-e64f-4a02-abb3-bee2049eb2e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331504 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-mountpoint-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331544 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331580 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/52ee1c0b-d021-43e3-a982-268e0af6f331-srv-cert\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331645 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-ca\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331702 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e50331c5-c197-47f6-a27d-abd4fd31410f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331750 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f34c9f6-5366-412e-a8b7-93837b5ea428-apiservice-cert\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331911 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-certificates\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331950 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nlj9g\" (UniqueName: \"kubernetes.io/projected/ed24741b-5476-4f20-bd17-4c8686d40419-kube-api-access-nlj9g\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.331987 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/019227a7-15fd-4c90-8807-f5aef16b2b10-serving-cert\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332023 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8578q\" (UniqueName: \"kubernetes.io/projected/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-kube-api-access-8578q\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332116 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-config\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332155 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbcef7ec-a2f0-4363-93e6-772d6d35d571-ca-trust-extracted\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332194 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-config\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332235 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/57c94140-5c17-4423-82aa-e62f070fa68c-auth-proxy-config\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332295 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332330 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbcef7ec-a2f0-4363-93e6-772d6d35d571-installation-pull-secrets\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332365 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sddld\" (UniqueName: \"kubernetes.io/projected/bd187a9c-688a-463f-a84a-6fb7c1df0360-kube-api-access-sddld\") pod \"package-server-manager-789f6589d5-4lmqq\" (UID: \"bd187a9c-688a-463f-a84a-6fb7c1df0360\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332449 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57c94140-5c17-4423-82aa-e62f070fa68c-config\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332486 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4edab72e-ed84-4e90-86da-02b3d3aa33bf-proxy-tls\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332536 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5a20850-1d32-4041-881c-098e06ecd4f8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332570 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/67f8479e-b919-4de1-8357-2fd41bf205a6-srv-cert\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332606 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt7mf\" (UniqueName: \"kubernetes.io/projected/cba73644-0f32-4d53-9c68-e98d52909f9a-kube-api-access-nt7mf\") pod \"migrator-59844c95c7-25hf6\" (UID: \"cba73644-0f32-4d53-9c68-e98d52909f9a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332645 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/52ee1c0b-d021-43e3-a982-268e0af6f331-profile-collector-cert\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332680 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jm2q\" (UniqueName: \"kubernetes.io/projected/28ae28d5-433c-4ce7-bb6e-2532d65b354d-kube-api-access-2jm2q\") pod \"multus-admission-controller-857f4d67dd-qwdgg\" (UID: \"28ae28d5-433c-4ce7-bb6e-2532d65b354d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332718 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96rlc\" (UniqueName: \"kubernetes.io/projected/a417f08a-e64f-4a02-abb3-bee2049eb2e7-kube-api-access-96rlc\") pod \"control-plane-machine-set-operator-78cbb6b69f-wdsds\" (UID: \"a417f08a-e64f-4a02-abb3-bee2049eb2e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332750 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/28ae28d5-433c-4ce7-bb6e-2532d65b354d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qwdgg\" (UID: \"28ae28d5-433c-4ce7-bb6e-2532d65b354d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332782 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-proxy-tls\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332925 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-serving-cert\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.332995 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/43dddc60-c6c0-48bb-9888-6cfb66efd812-signing-cabundle\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333082 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/4f34c9f6-5366-412e-a8b7-93837b5ea428-tmpfs\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333176 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39728d8c-03c4-42d3-999d-1dfe014cfb34-config-volume\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333220 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-registration-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333256 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-dir\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333292 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333326 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-stats-auth\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333360 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcsn6\" (UniqueName: \"kubernetes.io/projected/55fbff71-b86d-4b25-9593-b48effb4fb7f-kube-api-access-jcsn6\") pod \"ingress-canary-bx4tb\" (UID: \"55fbff71-b86d-4b25-9593-b48effb4fb7f\") " pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333395 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6f29acc2-2357-4418-9680-e743ccba8702-metrics-tls\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333432 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqdvl\" (UniqueName: \"kubernetes.io/projected/52ee1c0b-d021-43e3-a982-268e0af6f331-kube-api-access-kqdvl\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333467 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-config-volume\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333504 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/33133042-30b9-487e-8ee4-097e0faf7673-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-db62k\" (UID: \"33133042-30b9-487e-8ee4-097e0faf7673\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333541 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333575 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stxqp\" (UniqueName: \"kubernetes.io/projected/43dddc60-c6c0-48bb-9888-6cfb66efd812-kube-api-access-stxqp\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333619 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp268\" (UniqueName: \"kubernetes.io/projected/6b76376c-f080-4458-a87a-84eab1e4b86d-kube-api-access-kp268\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333652 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5b62\" (UniqueName: \"kubernetes.io/projected/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-kube-api-access-v5b62\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333697 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s4b75\" (UniqueName: \"kubernetes.io/projected/3a68cbce-a0d0-4128-b5fc-ba2664947314-kube-api-access-s4b75\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333731 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f29acc2-2357-4418-9680-e743ccba8702-bound-sa-token\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333774 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-policies\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333811 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-certs\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333854 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/55fbff71-b86d-4b25-9593-b48effb4fb7f-cert\") pod \"ingress-canary-bx4tb\" (UID: \"55fbff71-b86d-4b25-9593-b48effb4fb7f\") " pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333890 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-bound-sa-token\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333925 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-service-ca\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.333966 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f34c9f6-5366-412e-a8b7-93837b5ea428-webhook-cert\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334000 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnxxl\" (UniqueName: \"kubernetes.io/projected/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-kube-api-access-cnxxl\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334084 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dw22m\" (UniqueName: \"kubernetes.io/projected/de9628dc-df47-4a48-898b-f85d33e59452-kube-api-access-dw22m\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334121 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334164 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4edab72e-ed84-4e90-86da-02b3d3aa33bf-auth-proxy-config\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334211 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-client\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334263 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-audit-policies\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334306 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-socket-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334387 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-oauth-config\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334416 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334443 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334712 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-node-bootstrap-token\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334812 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/bd187a9c-688a-463f-a84a-6fb7c1df0360-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4lmqq\" (UID: \"bd187a9c-688a-463f-a84a-6fb7c1df0360\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.334999 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/43dddc60-c6c0-48bb-9888-6cfb66efd812-signing-key\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335394 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-encryption-config\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335514 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt7tm\" (UniqueName: \"kubernetes.io/projected/39728d8c-03c4-42d3-999d-1dfe014cfb34-kube-api-access-zt7tm\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335692 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335764 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-dir\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335792 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-serving-cert\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335884 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-default-certificate\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335982 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f03df2-7ec0-403c-8b72-1933efc742f5-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.335410 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336306 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-config\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336657 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-ca\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336658 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-service-ca-bundle\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336769 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-policies\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336806 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/57c94140-5c17-4423-82aa-e62f070fa68c-auth-proxy-config\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336818 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336888 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4edab72e-ed84-4e90-86da-02b3d3aa33bf-images\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336937 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-serving-cert\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.336987 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-etcd-client\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337079 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f29acc2-2357-4418-9680-e743ccba8702-trusted-ca\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g4z5\" (UniqueName: \"kubernetes.io/projected/6fdda9bf-9941-4f82-958f-22657d41aa74-kube-api-access-8g4z5\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337214 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-csi-data-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337302 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-console-config\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337370 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337423 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-tls\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337465 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/57c94140-5c17-4423-82aa-e62f070fa68c-machine-approver-tls\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337513 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337564 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-oauth-serving-cert\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.337922 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpbxs\" (UniqueName: \"kubernetes.io/projected/25f03df2-7ec0-403c-8b72-1933efc742f5-kube-api-access-vpbxs\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338082 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhs5q\" (UniqueName: \"kubernetes.io/projected/019227a7-15fd-4c90-8807-f5aef16b2b10-kube-api-access-xhs5q\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338149 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb964fa0-1524-42ab-b399-8e9a7e7e3543-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338215 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb86p\" (UniqueName: \"kubernetes.io/projected/f5a20850-1d32-4041-881c-098e06ecd4f8-kube-api-access-pb86p\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338314 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-config\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-client-ca\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338506 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e50331c5-c197-47f6-a27d-abd4fd31410f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338625 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338769 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-trusted-ca\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338843 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-metrics-certs\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338908 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vd5m\" (UniqueName: \"kubernetes.io/projected/eba39d45-3292-48d5-be72-9f948b5ff2fe-kube-api-access-9vd5m\") pod \"dns-operator-744455d44c-jhgqt\" (UID: \"eba39d45-3292-48d5-be72-9f948b5ff2fe\") " pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.338969 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339016 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jns2h\" (UniqueName: \"kubernetes.io/projected/67f8479e-b919-4de1-8357-2fd41bf205a6-kube-api-access-jns2h\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339110 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-config\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339156 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-plugins-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339259 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jmlj\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-kube-api-access-9jmlj\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339359 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sghmn\" (UniqueName: \"kubernetes.io/projected/33133042-30b9-487e-8ee4-097e0faf7673-kube-api-access-sghmn\") pod \"cluster-samples-operator-665b6dd947-db62k\" (UID: \"33133042-30b9-487e-8ee4-097e0faf7673\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339454 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-djjxn\" (UniqueName: \"kubernetes.io/projected/57c94140-5c17-4423-82aa-e62f070fa68c-kube-api-access-djjxn\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339554 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jfck\" (UniqueName: \"kubernetes.io/projected/4f34c9f6-5366-412e-a8b7-93837b5ea428-kube-api-access-6jfck\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339642 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339729 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b76376c-f080-4458-a87a-84eab1e4b86d-serving-cert\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339844 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwclk\" (UniqueName: \"kubernetes.io/projected/4edab72e-ed84-4e90-86da-02b3d3aa33bf-kube-api-access-wwclk\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.339945 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fdda9bf-9941-4f82-958f-22657d41aa74-serving-cert\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340182 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sdt5\" (UniqueName: \"kubernetes.io/projected/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-kube-api-access-7sdt5\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340236 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25f03df2-7ec0-403c-8b72-1933efc742f5-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340282 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/67f8479e-b919-4de1-8357-2fd41bf205a6-profile-collector-cert\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340335 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-metrics-tls\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340386 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340430 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eba39d45-3292-48d5-be72-9f948b5ff2fe-metrics-tls\") pod \"dns-operator-744455d44c-jhgqt\" (UID: \"eba39d45-3292-48d5-be72-9f948b5ff2fe\") " pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340474 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssgbq\" (UniqueName: \"kubernetes.io/projected/190e4233-a97e-4af7-8e7e-d66ccf827546-kube-api-access-ssgbq\") pod \"downloads-7954f5f757-6xms4\" (UID: \"190e4233-a97e-4af7-8e7e-d66ccf827546\") " pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340521 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hjnjp\" (UniqueName: \"kubernetes.io/projected/f05088c1-1548-4c56-8e14-3610540dec5c-kube-api-access-hjnjp\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340575 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.340649 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e50331c5-c197-47f6-a27d-abd4fd31410f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.341616 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-service-ca\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.342782 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-audit-policies\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.345493 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/25f03df2-7ec0-403c-8b72-1933efc742f5-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.347757 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/4edab72e-ed84-4e90-86da-02b3d3aa33bf-auth-proxy-config\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.359579 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.346906 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-oauth-serving-cert\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.360862 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.361465 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6f29acc2-2357-4418-9680-e743ccba8702-metrics-tls\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.362454 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-stats-auth\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.362628 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.362703 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.363319 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/33133042-30b9-487e-8ee4-097e0faf7673-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-db62k\" (UID: \"33133042-30b9-487e-8ee4-097e0faf7673\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.365922 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-encryption-config\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.376364 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a20850-1d32-4041-881c-098e06ecd4f8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.376457 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f05088c1-1548-4c56-8e14-3610540dec5c-audit-dir\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.376617 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-service-ca\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.376742 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/f05088c1-1548-4c56-8e14-3610540dec5c-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.377159 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-trusted-ca\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.378169 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-service-ca\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.378932 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-certificates\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.379167 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f5a20850-1d32-4041-881c-098e06ecd4f8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.379851 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp268\" (UniqueName: \"kubernetes.io/projected/6b76376c-f080-4458-a87a-84eab1e4b86d-kube-api-access-kp268\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.383464 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.383718 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6fdda9bf-9941-4f82-958f-22657d41aa74-serving-cert\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.383881 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-console-config\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.383897 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/67f8479e-b919-4de1-8357-2fd41bf205a6-profile-collector-cert\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.384441 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-serving-cert\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.384590 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.384607 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-oauth-config\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.384926 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/019227a7-15fd-4c90-8807-f5aef16b2b10-etcd-client\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.385344 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/25f03df2-7ec0-403c-8b72-1933efc742f5-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.385482 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/eba39d45-3292-48d5-be72-9f948b5ff2fe-metrics-tls\") pod \"dns-operator-744455d44c-jhgqt\" (UID: \"eba39d45-3292-48d5-be72-9f948b5ff2fe\") " pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.385489 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-metrics-certs\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.387922 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f05088c1-1548-4c56-8e14-3610540dec5c-audit-dir\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.388181 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/eb964fa0-1524-42ab-b399-8e9a7e7e3543-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.388278 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b76376c-f080-4458-a87a-84eab1e4b86d-serving-cert\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.388412 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbcef7ec-a2f0-4363-93e6-772d6d35d571-ca-trust-extracted\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.388592 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/6f29acc2-2357-4418-9680-e743ccba8702-bound-sa-token\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.389294 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.389345 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/57c94140-5c17-4423-82aa-e62f070fa68c-config\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.389895 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.391842 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f5a20850-1d32-4041-881c-098e06ecd4f8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.392843 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.392864 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/4edab72e-ed84-4e90-86da-02b3d3aa33bf-images\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393209 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393324 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393331 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/67f8479e-b919-4de1-8357-2fd41bf205a6-srv-cert\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393587 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393639 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-trusted-ca-bundle\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393667 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4h2rs\" (UniqueName: \"kubernetes.io/projected/6f29acc2-2357-4418-9680-e743ccba8702-kube-api-access-4h2rs\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393729 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39728d8c-03c4-42d3-999d-1dfe014cfb34-secret-volume\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393751 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmt24\" (UniqueName: \"kubernetes.io/projected/e9405376-0114-4bee-b245-f17b30f2594a-kube-api-access-fmt24\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.393778 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.394247 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb964fa0-1524-42ab-b399-8e9a7e7e3543-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.394521 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbcef7ec-a2f0-4363-93e6-772d6d35d571-installation-pull-secrets\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.394869 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-service-ca-bundle\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.395419 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-tls\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.395675 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f29acc2-2357-4418-9680-e743ccba8702-trusted-ca\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.396744 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-client-ca\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.396972 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6fdda9bf-9941-4f82-958f-22657d41aa74-config\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.397020 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:27.896996789 +0000 UTC m=+170.224820733 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.397610 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-etcd-client\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.399218 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.399605 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-config\") pod \"route-controller-manager-6576b87f9c-jslph\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.400418 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/4edab72e-ed84-4e90-86da-02b3d3aa33bf-proxy-tls\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.400903 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f05088c1-1548-4c56-8e14-3610540dec5c-serving-cert\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.402017 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/57c94140-5c17-4423-82aa-e62f070fa68c-machine-approver-tls\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.403722 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/019227a7-15fd-4c90-8807-f5aef16b2b10-serving-cert\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.405188 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/de9628dc-df47-4a48-898b-f85d33e59452-default-certificate\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.407014 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-5n589"] Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.407169 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-trusted-ca-bundle\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.409521 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-bound-sa-token\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.428278 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb86p\" (UniqueName: \"kubernetes.io/projected/f5a20850-1d32-4041-881c-098e06ecd4f8-kube-api-access-pb86p\") pod \"kube-storage-version-migrator-operator-b67b599dd-5tbsf\" (UID: \"f5a20850-1d32-4041-881c-098e06ecd4f8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.445294 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vd5m\" (UniqueName: \"kubernetes.io/projected/eba39d45-3292-48d5-be72-9f948b5ff2fe-kube-api-access-9vd5m\") pod \"dns-operator-744455d44c-jhgqt\" (UID: \"eba39d45-3292-48d5-be72-9f948b5ff2fe\") " pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.469531 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwclk\" (UniqueName: \"kubernetes.io/projected/4edab72e-ed84-4e90-86da-02b3d3aa33bf-kube-api-access-wwclk\") pod \"machine-config-operator-74547568cd-wr2bq\" (UID: \"4edab72e-ed84-4e90-86da-02b3d3aa33bf\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.495227 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hjnjp\" (UniqueName: \"kubernetes.io/projected/f05088c1-1548-4c56-8e14-3610540dec5c-kube-api-access-hjnjp\") pod \"apiserver-7bbb656c7d-h55gt\" (UID: \"f05088c1-1548-4c56-8e14-3610540dec5c\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.495714 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.495948 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stxqp\" (UniqueName: \"kubernetes.io/projected/43dddc60-c6c0-48bb-9888-6cfb66efd812-kube-api-access-stxqp\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.495981 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5b62\" (UniqueName: \"kubernetes.io/projected/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-kube-api-access-v5b62\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496002 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s4b75\" (UniqueName: \"kubernetes.io/projected/3a68cbce-a0d0-4128-b5fc-ba2664947314-kube-api-access-s4b75\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496023 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-certs\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.496055 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:27.996021131 +0000 UTC m=+170.323845075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496084 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/55fbff71-b86d-4b25-9593-b48effb4fb7f-cert\") pod \"ingress-canary-bx4tb\" (UID: \"55fbff71-b86d-4b25-9593-b48effb4fb7f\") " pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496117 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f34c9f6-5366-412e-a8b7-93837b5ea428-webhook-cert\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496548 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnxxl\" (UniqueName: \"kubernetes.io/projected/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-kube-api-access-cnxxl\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496599 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-node-bootstrap-token\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496621 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-socket-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496638 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/43dddc60-c6c0-48bb-9888-6cfb66efd812-signing-key\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496911 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/bd187a9c-688a-463f-a84a-6fb7c1df0360-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4lmqq\" (UID: \"bd187a9c-688a-463f-a84a-6fb7c1df0360\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496931 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt7tm\" (UniqueName: \"kubernetes.io/projected/39728d8c-03c4-42d3-999d-1dfe014cfb34-kube-api-access-zt7tm\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496931 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-socket-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496951 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496971 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-serving-cert\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.496992 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-csi-data-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497027 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e50331c5-c197-47f6-a27d-abd4fd31410f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497080 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497124 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-plugins-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jfck\" (UniqueName: \"kubernetes.io/projected/4f34c9f6-5366-412e-a8b7-93837b5ea428-kube-api-access-6jfck\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497162 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-metrics-tls\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497209 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssgbq\" (UniqueName: \"kubernetes.io/projected/190e4233-a97e-4af7-8e7e-d66ccf827546-kube-api-access-ssgbq\") pod \"downloads-7954f5f757-6xms4\" (UID: \"190e4233-a97e-4af7-8e7e-d66ccf827546\") " pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497233 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e50331c5-c197-47f6-a27d-abd4fd31410f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497260 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497283 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39728d8c-03c4-42d3-999d-1dfe014cfb34-secret-volume\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497315 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmt24\" (UniqueName: \"kubernetes.io/projected/e9405376-0114-4bee-b245-f17b30f2594a-kube-api-access-fmt24\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497337 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497354 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmvf7\" (UniqueName: \"kubernetes.io/projected/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-kube-api-access-dmvf7\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497391 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a417f08a-e64f-4a02-abb3-bee2049eb2e7-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-wdsds\" (UID: \"a417f08a-e64f-4a02-abb3-bee2049eb2e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497411 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-mountpoint-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497430 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/52ee1c0b-d021-43e3-a982-268e0af6f331-srv-cert\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497556 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e50331c5-c197-47f6-a27d-abd4fd31410f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497572 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f34c9f6-5366-412e-a8b7-93837b5ea428-apiservice-cert\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497594 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-config\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497611 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8578q\" (UniqueName: \"kubernetes.io/projected/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-kube-api-access-8578q\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497633 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sddld\" (UniqueName: \"kubernetes.io/projected/bd187a9c-688a-463f-a84a-6fb7c1df0360-kube-api-access-sddld\") pod \"package-server-manager-789f6589d5-4lmqq\" (UID: \"bd187a9c-688a-463f-a84a-6fb7c1df0360\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497667 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt7mf\" (UniqueName: \"kubernetes.io/projected/cba73644-0f32-4d53-9c68-e98d52909f9a-kube-api-access-nt7mf\") pod \"migrator-59844c95c7-25hf6\" (UID: \"cba73644-0f32-4d53-9c68-e98d52909f9a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497687 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/52ee1c0b-d021-43e3-a982-268e0af6f331-profile-collector-cert\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497703 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jm2q\" (UniqueName: \"kubernetes.io/projected/28ae28d5-433c-4ce7-bb6e-2532d65b354d-kube-api-access-2jm2q\") pod \"multus-admission-controller-857f4d67dd-qwdgg\" (UID: \"28ae28d5-433c-4ce7-bb6e-2532d65b354d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497720 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96rlc\" (UniqueName: \"kubernetes.io/projected/a417f08a-e64f-4a02-abb3-bee2049eb2e7-kube-api-access-96rlc\") pod \"control-plane-machine-set-operator-78cbb6b69f-wdsds\" (UID: \"a417f08a-e64f-4a02-abb3-bee2049eb2e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497735 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/28ae28d5-433c-4ce7-bb6e-2532d65b354d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qwdgg\" (UID: \"28ae28d5-433c-4ce7-bb6e-2532d65b354d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497754 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-proxy-tls\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497771 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39728d8c-03c4-42d3-999d-1dfe014cfb34-config-volume\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497786 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/43dddc60-c6c0-48bb-9888-6cfb66efd812-signing-cabundle\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497800 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/4f34c9f6-5366-412e-a8b7-93837b5ea428-tmpfs\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497827 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcsn6\" (UniqueName: \"kubernetes.io/projected/55fbff71-b86d-4b25-9593-b48effb4fb7f-kube-api-access-jcsn6\") pod \"ingress-canary-bx4tb\" (UID: \"55fbff71-b86d-4b25-9593-b48effb4fb7f\") " pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497841 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-registration-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497859 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqdvl\" (UniqueName: \"kubernetes.io/projected/52ee1c0b-d021-43e3-a982-268e0af6f331-kube-api-access-kqdvl\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.497874 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-config-volume\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.498451 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-config-volume\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.499155 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-certs\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.499498 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/bd187a9c-688a-463f-a84a-6fb7c1df0360-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-4lmqq\" (UID: \"bd187a9c-688a-463f-a84a-6fb7c1df0360\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.501019 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-config\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.501198 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/52ee1c0b-d021-43e3-a982-268e0af6f331-srv-cert\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.501841 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.001826797 +0000 UTC m=+170.329650741 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.502329 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/43dddc60-c6c0-48bb-9888-6cfb66efd812-signing-key\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.502577 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-plugins-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.502888 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-node-bootstrap-token\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.503240 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-csi-data-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.503532 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/55fbff71-b86d-4b25-9593-b48effb4fb7f-cert\") pod \"ingress-canary-bx4tb\" (UID: \"55fbff71-b86d-4b25-9593-b48effb4fb7f\") " pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.503611 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e50331c5-c197-47f6-a27d-abd4fd31410f-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.503742 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e50331c5-c197-47f6-a27d-abd4fd31410f-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.504160 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/43dddc60-c6c0-48bb-9888-6cfb66efd812-signing-cabundle\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.504320 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sdt5\" (UniqueName: \"kubernetes.io/projected/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-kube-api-access-7sdt5\") pod \"oauth-openshift-558db77b4-lzm5j\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.504464 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-registration-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.504784 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39728d8c-03c4-42d3-999d-1dfe014cfb34-config-volume\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.504884 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.505134 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3a68cbce-a0d0-4128-b5fc-ba2664947314-mountpoint-dir\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.505647 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-metrics-tls\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.505950 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.506004 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-serving-cert\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.506325 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/52ee1c0b-d021-43e3-a982-268e0af6f331-profile-collector-cert\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.507493 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-proxy-tls\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.507763 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a417f08a-e64f-4a02-abb3-bee2049eb2e7-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-wdsds\" (UID: \"a417f08a-e64f-4a02-abb3-bee2049eb2e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.508259 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39728d8c-03c4-42d3-999d-1dfe014cfb34-secret-volume\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.508983 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/28ae28d5-433c-4ce7-bb6e-2532d65b354d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-qwdgg\" (UID: \"28ae28d5-433c-4ce7-bb6e-2532d65b354d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.524153 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpbxs\" (UniqueName: \"kubernetes.io/projected/25f03df2-7ec0-403c-8b72-1933efc742f5-kube-api-access-vpbxs\") pod \"openshift-controller-manager-operator-756b6f6bc6-xnr66\" (UID: \"25f03df2-7ec0-403c-8b72-1933efc742f5\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.528002 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.542353 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhs5q\" (UniqueName: \"kubernetes.io/projected/019227a7-15fd-4c90-8807-f5aef16b2b10-kube-api-access-xhs5q\") pod \"etcd-operator-b45778765-xpmpp\" (UID: \"019227a7-15fd-4c90-8807-f5aef16b2b10\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.547511 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.560453 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dw22m\" (UniqueName: \"kubernetes.io/projected/de9628dc-df47-4a48-898b-f85d33e59452-kube-api-access-dw22m\") pod \"router-default-5444994796-l9d42\" (UID: \"de9628dc-df47-4a48-898b-f85d33e59452\") " pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.582601 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.584980 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jns2h\" (UniqueName: \"kubernetes.io/projected/67f8479e-b919-4de1-8357-2fd41bf205a6-kube-api-access-jns2h\") pod \"catalog-operator-68c6474976-hggv7\" (UID: \"67f8479e-b919-4de1-8357-2fd41bf205a6\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.585967 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-zvsj5"] Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.592955 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.599134 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9"] Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.599213 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.599237 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.099196255 +0000 UTC m=+170.427020199 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.599557 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.600191 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.100175342 +0000 UTC m=+170.427999276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.603252 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nlj9g\" (UniqueName: \"kubernetes.io/projected/ed24741b-5476-4f20-bd17-4c8686d40419-kube-api-access-nlj9g\") pod \"console-f9d7485db-t4xd8\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.622487 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4h2rs\" (UniqueName: \"kubernetes.io/projected/6f29acc2-2357-4418-9680-e743ccba8702-kube-api-access-4h2rs\") pod \"ingress-operator-5b745b69d9-65j9k\" (UID: \"6f29acc2-2357-4418-9680-e743ccba8702\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.629135 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.634589 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z"] Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.639997 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.641603 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq"] Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.642131 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jmlj\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-kube-api-access-9jmlj\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.646381 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.652590 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.661921 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sghmn\" (UniqueName: \"kubernetes.io/projected/33133042-30b9-487e-8ee4-097e0faf7673-kube-api-access-sghmn\") pod \"cluster-samples-operator-665b6dd947-db62k\" (UID: \"33133042-30b9-487e-8ee4-097e0faf7673\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.682074 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-djjxn\" (UniqueName: \"kubernetes.io/projected/57c94140-5c17-4423-82aa-e62f070fa68c-kube-api-access-djjxn\") pod \"machine-approver-56656f9798-9sxtr\" (UID: \"57c94140-5c17-4423-82aa-e62f070fa68c\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.685638 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.700587 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.700833 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.200785525 +0000 UTC m=+170.528609509 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.701321 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.701882 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.201865995 +0000 UTC m=+170.529689969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.742842 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s4b75\" (UniqueName: \"kubernetes.io/projected/3a68cbce-a0d0-4128-b5fc-ba2664947314-kube-api-access-s4b75\") pod \"csi-hostpathplugin-4gjwk\" (UID: \"3a68cbce-a0d0-4128-b5fc-ba2664947314\") " pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.749535 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.759394 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.774539 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stxqp\" (UniqueName: \"kubernetes.io/projected/43dddc60-c6c0-48bb-9888-6cfb66efd812-kube-api-access-stxqp\") pod \"service-ca-9c57cc56f-9gzzw\" (UID: \"43dddc60-c6c0-48bb-9888-6cfb66efd812\") " pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.774843 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.792293 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnxxl\" (UniqueName: \"kubernetes.io/projected/11e93d60-f22e-4b73-b41c-72a9b55e4ff5-kube-api-access-cnxxl\") pod \"service-ca-operator-777779d784-kcwkb\" (UID: \"11e93d60-f22e-4b73-b41c-72a9b55e4ff5\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.803089 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.803309 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.303273141 +0000 UTC m=+170.631097075 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.803669 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.804106 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.304096603 +0000 UTC m=+170.631920547 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.804863 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt7tm\" (UniqueName: \"kubernetes.io/projected/39728d8c-03c4-42d3-999d-1dfe014cfb34-kube-api-access-zt7tm\") pod \"collect-profiles-29416635-btv7s\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.824109 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5b62\" (UniqueName: \"kubernetes.io/projected/971d28d8-7a3b-4af0-a3e3-9ee9468dbca5-kube-api-access-v5b62\") pod \"dns-default-jk2pn\" (UID: \"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5\") " pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.841235 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb964fa0-1524-42ab-b399-8e9a7e7e3543-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-qld9m\" (UID: \"eb964fa0-1524-42ab-b399-8e9a7e7e3543\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.841755 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/4f34c9f6-5366-412e-a8b7-93837b5ea428-webhook-cert\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.842115 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/4f34c9f6-5366-412e-a8b7-93837b5ea428-apiservice-cert\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.842364 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/4f34c9f6-5366-412e-a8b7-93837b5ea428-tmpfs\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.846158 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96rlc\" (UniqueName: \"kubernetes.io/projected/a417f08a-e64f-4a02-abb3-bee2049eb2e7-kube-api-access-96rlc\") pod \"control-plane-machine-set-operator-78cbb6b69f-wdsds\" (UID: \"a417f08a-e64f-4a02-abb3-bee2049eb2e7\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.846774 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.850860 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g4z5\" (UniqueName: \"kubernetes.io/projected/6fdda9bf-9941-4f82-958f-22657d41aa74-kube-api-access-8g4z5\") pod \"authentication-operator-69f744f599-n6bpb\" (UID: \"6fdda9bf-9941-4f82-958f-22657d41aa74\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.854149 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.877372 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e50331c5-c197-47f6-a27d-abd4fd31410f-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jgqp9\" (UID: \"e50331c5-c197-47f6-a27d-abd4fd31410f\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.886736 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.896820 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sddld\" (UniqueName: \"kubernetes.io/projected/bd187a9c-688a-463f-a84a-6fb7c1df0360-kube-api-access-sddld\") pod \"package-server-manager-789f6589d5-4lmqq\" (UID: \"bd187a9c-688a-463f-a84a-6fb7c1df0360\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.905099 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8578q\" (UniqueName: \"kubernetes.io/projected/a2b02aaa-3dd3-462e-9dd6-c69748bc8511-kube-api-access-8578q\") pod \"machine-config-controller-84d6567774-vfwnb\" (UID: \"a2b02aaa-3dd3-462e-9dd6-c69748bc8511\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.905739 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.906164 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.406035194 +0000 UTC m=+170.733859178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.906300 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:27 crc kubenswrapper[4706]: E1206 05:22:27.907196 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.407179275 +0000 UTC m=+170.735003269 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.915704 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.922662 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.931033 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssgbq\" (UniqueName: \"kubernetes.io/projected/190e4233-a97e-4af7-8e7e-d66ccf827546-kube-api-access-ssgbq\") pod \"downloads-7954f5f757-6xms4\" (UID: \"190e4233-a97e-4af7-8e7e-d66ccf827546\") " pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.967406 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.974726 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jfck\" (UniqueName: \"kubernetes.io/projected/4f34c9f6-5366-412e-a8b7-93837b5ea428-kube-api-access-6jfck\") pod \"packageserver-d55dfcdfc-mkj59\" (UID: \"4f34c9f6-5366-412e-a8b7-93837b5ea428\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.979485 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.979836 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt7mf\" (UniqueName: \"kubernetes.io/projected/cba73644-0f32-4d53-9c68-e98d52909f9a-kube-api-access-nt7mf\") pod \"migrator-59844c95c7-25hf6\" (UID: \"cba73644-0f32-4d53-9c68-e98d52909f9a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" Dec 06 05:22:27 crc kubenswrapper[4706]: I1206 05:22:27.995522 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jm2q\" (UniqueName: \"kubernetes.io/projected/28ae28d5-433c-4ce7-bb6e-2532d65b354d-kube-api-access-2jm2q\") pod \"multus-admission-controller-857f4d67dd-qwdgg\" (UID: \"28ae28d5-433c-4ce7-bb6e-2532d65b354d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.003904 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.008318 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.008666 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.508616861 +0000 UTC m=+170.836440835 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.008994 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.009478 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.509460615 +0000 UTC m=+170.837284579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.011808 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.016231 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.025402 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.033670 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmvf7\" (UniqueName: \"kubernetes.io/projected/23fe0ae4-ae9d-4470-871f-fb431c6c6c80-kube-api-access-dmvf7\") pod \"machine-config-server-gm4d4\" (UID: \"23fe0ae4-ae9d-4470-871f-fb431c6c6c80\") " pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.033783 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcsn6\" (UniqueName: \"kubernetes.io/projected/55fbff71-b86d-4b25-9593-b48effb4fb7f-kube-api-access-jcsn6\") pod \"ingress-canary-bx4tb\" (UID: \"55fbff71-b86d-4b25-9593-b48effb4fb7f\") " pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.038450 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.043177 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.043686 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.051678 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqdvl\" (UniqueName: \"kubernetes.io/projected/52ee1c0b-d021-43e3-a982-268e0af6f331-kube-api-access-kqdvl\") pod \"olm-operator-6b444d44fb-pzbsr\" (UID: \"52ee1c0b-d021-43e3-a982-268e0af6f331\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.052140 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.058638 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.071385 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmt24\" (UniqueName: \"kubernetes.io/projected/e9405376-0114-4bee-b245-f17b30f2594a-kube-api-access-fmt24\") pod \"marketplace-operator-79b997595-xptzp\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.074071 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" event={"ID":"a68f594e-b151-4902-9792-b5d6051525dd","Type":"ContainerStarted","Data":"773a1199e975804196db9976f76631852e435153ea3f4f7175d184efb4a6caec"} Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.082835 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-gm4d4" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.109642 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.109847 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.609814802 +0000 UTC m=+170.937638746 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.110511 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.111500 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.611483487 +0000 UTC m=+170.939307431 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.210616 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.212479 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.212703 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.712666777 +0000 UTC m=+171.040490721 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.212882 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.213351 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.713339985 +0000 UTC m=+171.041163929 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.260962 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.272835 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.273469 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-4gjwk"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.287530 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.296162 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.314543 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.314777 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.814744862 +0000 UTC m=+171.142568806 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.314865 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.315262 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.815254376 +0000 UTC m=+171.143078310 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.331456 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-bx4tb" Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.378029 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" event={"ID":"76a85e06-bb22-4260-8a17-639478f9b3ca","Type":"ContainerStarted","Data":"2678833a5a32d92a12c16a4db8b243d7fdb75b07ba78fd8e052006e37ba333f2"} Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.401499 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" event={"ID":"58a1210d-91bd-4a47-b70a-c8026a238565","Type":"ContainerStarted","Data":"c6af562e163cb7e57ac13f59d4d8fce6569d998c7bdd60f2ecd854ed387eb2b0"} Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.404232 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" event={"ID":"152ec06e-1c86-4db5-87a5-a96da88e008e","Type":"ContainerStarted","Data":"0a53819f1a05ff94b8be9e28baa613de41f116d993e96f854d7f5b04f2589117"} Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.410440 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" event={"ID":"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1","Type":"ContainerStarted","Data":"9ab29d8888d6f77a3f838abf0b255427c0c870417829b449b1c24cf595f06bfa"} Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.416537 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.417095 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:28.917075593 +0000 UTC m=+171.244899527 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.469558 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-t4xd8"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.476984 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xpmpp"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.517649 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.518060 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.018029917 +0000 UTC m=+171.345853861 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.621155 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.621526 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.121492288 +0000 UTC m=+171.449316232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.621763 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.622138 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.122123695 +0000 UTC m=+171.449947639 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.722818 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.723210 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.223189582 +0000 UTC m=+171.551013526 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.775937 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-jhgqt"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.785537 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.824136 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.824691 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.324677091 +0000 UTC m=+171.652501035 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: W1206 05:22:28.893629 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b76376c_f080_4458_a87a_84eab1e4b86d.slice/crio-0fa95df1eb46491aad1f938a7804239b8ac2fc4cc48e9663d38a0fc5c43aea18 WatchSource:0}: Error finding container 0fa95df1eb46491aad1f938a7804239b8ac2fc4cc48e9663d38a0fc5c43aea18: Status 404 returned error can't find the container with id 0fa95df1eb46491aad1f938a7804239b8ac2fc4cc48e9663d38a0fc5c43aea18 Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.910373 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lzm5j"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.918761 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7"] Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.924967 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.925444 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.425419849 +0000 UTC m=+171.753243793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:28 crc kubenswrapper[4706]: I1206 05:22:28.925526 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:28 crc kubenswrapper[4706]: E1206 05:22:28.925900 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.425893222 +0000 UTC m=+171.753717166 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.027223 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.032433 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.532384685 +0000 UTC m=+171.860208629 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.034176 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.043118 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf"] Dec 06 05:22:29 crc kubenswrapper[4706]: W1206 05:22:29.114285 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23fe0ae4_ae9d_4470_871f_fb431c6c6c80.slice/crio-ee9ac8dabcb3a62ed040f7aff2bc0bf068c4b940d6ebc49fd4605d856be917fb WatchSource:0}: Error finding container ee9ac8dabcb3a62ed040f7aff2bc0bf068c4b940d6ebc49fd4605d856be917fb: Status 404 returned error can't find the container with id ee9ac8dabcb3a62ed040f7aff2bc0bf068c4b940d6ebc49fd4605d856be917fb Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.134686 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.135099 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.635080655 +0000 UTC m=+171.962904599 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.160845 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9gzzw"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.235495 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.236466 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.236605 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k"] Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.236784 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.73676383 +0000 UTC m=+172.064587774 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.259710 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.291259 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.297619 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.337857 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.338093 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.338387 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.838371681 +0000 UTC m=+172.166195625 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.340514 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.359002 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.383833 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.389772 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.394200 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f4065785-c72e-4c45-ab51-ce292be4f2ed-metrics-certs\") pod \"network-metrics-daemon-4ltjs\" (UID: \"f4065785-c72e-4c45-ab51-ce292be4f2ed\") " pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.408470 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.439035 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" event={"ID":"4230f0fb-f05e-4ae6-9755-db33865a6c33","Type":"ContainerStarted","Data":"4736ee8481bdd77367c3d3b48f21f26dbe6c822a21ae414b4fe4f8a88ec82025"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.444305 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-l9d42" event={"ID":"de9628dc-df47-4a48-898b-f85d33e59452","Type":"ContainerStarted","Data":"e64b8e127184fbb53aa66855523f30a337d86064899cee71df3a0161a3ab9fbf"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.451001 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.462410 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:29.962373617 +0000 UTC m=+172.290197561 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.503456 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9"] Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.530278 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" event={"ID":"445fbc3d-3a2f-4361-8444-badce4d8e564","Type":"ContainerStarted","Data":"25810a7add87ca617849f740353b79060f445c8bf27a36491e5a3835778706af"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.538440 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" event={"ID":"57c94140-5c17-4423-82aa-e62f070fa68c","Type":"ContainerStarted","Data":"8cbf26dbb7e40f87703c98150c87f286c2deeee76360fe1ad9b7a62a79e89f5b"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.553598 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" event={"ID":"a4df44f2-c01b-47ab-a7df-6b30ea0510a3","Type":"ContainerStarted","Data":"e8460b4a1cffb24f02ff946aaf9aa0ffc47212e2c95d73d31424b7a4ab840973"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.554146 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.554589 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.054563774 +0000 UTC m=+172.382387748 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.557681 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-4ltjs" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.564434 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" event={"ID":"67f8479e-b919-4de1-8357-2fd41bf205a6","Type":"ContainerStarted","Data":"b8ac8a311c2074fa320b2dcebd62ef95d2f0d819191c4112b2ac3ff748c4ef3c"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.591511 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-t4xd8" event={"ID":"ed24741b-5476-4f20-bd17-4c8686d40419","Type":"ContainerStarted","Data":"4c4a31b8b66cea562439ba1bcf44978e8a9aac0cbb2a042175bfab8e9b0f43d6"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.595958 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" event={"ID":"3a68cbce-a0d0-4128-b5fc-ba2664947314","Type":"ContainerStarted","Data":"ff03ac74ec4ae11f5d31030a7c4efe3b61a91112a507968944c8d46bacea1635"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.605621 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" event={"ID":"6b76376c-f080-4458-a87a-84eab1e4b86d","Type":"ContainerStarted","Data":"0fa95df1eb46491aad1f938a7804239b8ac2fc4cc48e9663d38a0fc5c43aea18"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.612182 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" event={"ID":"f05088c1-1548-4c56-8e14-3610540dec5c","Type":"ContainerStarted","Data":"6ff60732d0e836b35206902af188e4c804f7190e19fa14a951c286655fbcd88b"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.627584 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" event={"ID":"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077","Type":"ContainerStarted","Data":"939ce110ecb14d71635d7de21702ccd5ad434c7ac6cd72e9a3a20104b21e806e"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.628366 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.631475 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gm4d4" event={"ID":"23fe0ae4-ae9d-4470-871f-fb431c6c6c80","Type":"ContainerStarted","Data":"ee9ac8dabcb3a62ed040f7aff2bc0bf068c4b940d6ebc49fd4605d856be917fb"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.632375 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" event={"ID":"019227a7-15fd-4c90-8807-f5aef16b2b10","Type":"ContainerStarted","Data":"2c5f1fb8f11d414ad9962c7641d6360250b05276636c5c4c13b7277975999115"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.657136 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.657491 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.157475321 +0000 UTC m=+172.485299265 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.658199 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.763131 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.763540 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.263518613 +0000 UTC m=+172.591342557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.777922 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" event={"ID":"25f03df2-7ec0-403c-8b72-1933efc742f5","Type":"ContainerStarted","Data":"749b3f6bb6c54a05c76df2f8a0f4acc042b23252fe9cbb47b4dbd07a6e66303a"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.783688 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" event={"ID":"eba39d45-3292-48d5-be72-9f948b5ff2fe","Type":"ContainerStarted","Data":"b7f2d6e61f10231d9586e9d3caf57805435e7da15bab22b166101846bfdd20c8"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.787670 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" event={"ID":"f5a20850-1d32-4041-881c-098e06ecd4f8","Type":"ContainerStarted","Data":"be4965fb7421e1ad4ae8b3216b70ff79d24cf03e71b650b9c9772a88f8cd9024"} Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.848668 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" podStartSLOduration=143.848653289 podStartE2EDuration="2m23.848653289s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:29.846270855 +0000 UTC m=+172.174094799" watchObservedRunningTime="2025-12-06 05:22:29.848653289 +0000 UTC m=+172.176477223" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.865490 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.865772 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.365752311 +0000 UTC m=+172.693576255 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.869506 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" podStartSLOduration=144.869493052 podStartE2EDuration="2m24.869493052s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:29.86681641 +0000 UTC m=+172.194640374" watchObservedRunningTime="2025-12-06 05:22:29.869493052 +0000 UTC m=+172.197316996" Dec 06 05:22:29 crc kubenswrapper[4706]: I1206 05:22:29.972727 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:29 crc kubenswrapper[4706]: E1206 05:22:29.973144 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.473131219 +0000 UTC m=+172.800955163 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: W1206 05:22:30.035283 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52ee1c0b_d021_43e3_a982_268e0af6f331.slice/crio-d9731717cc9b9e6f5d3ebd3065bd2e11126c4ca3886a7e865504a4d5a53d845d WatchSource:0}: Error finding container d9731717cc9b9e6f5d3ebd3065bd2e11126c4ca3886a7e865504a4d5a53d845d: Status 404 returned error can't find the container with id d9731717cc9b9e6f5d3ebd3065bd2e11126c4ca3886a7e865504a4d5a53d845d Dec 06 05:22:30 crc kubenswrapper[4706]: W1206 05:22:30.041938 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb964fa0_1524_42ab_b399_8e9a7e7e3543.slice/crio-202a376656ac464e32b15ea13685f7bf3820c8ae72256325594b7d8d5ff0f951 WatchSource:0}: Error finding container 202a376656ac464e32b15ea13685f7bf3820c8ae72256325594b7d8d5ff0f951: Status 404 returned error can't find the container with id 202a376656ac464e32b15ea13685f7bf3820c8ae72256325594b7d8d5ff0f951 Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.074238 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.075096 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.575075409 +0000 UTC m=+172.902899353 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.085838 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.181571 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.182006 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.681992473 +0000 UTC m=+173.009816417 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.285196 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.285507 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.785487326 +0000 UTC m=+173.113311270 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.320853 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.323390 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-jk2pn"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.346485 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-n6bpb"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.363564 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-6xms4"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.388370 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.388928 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.888904726 +0000 UTC m=+173.216728670 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.392874 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-qwdgg"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.399659 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-bx4tb"] Dec 06 05:22:30 crc kubenswrapper[4706]: W1206 05:22:30.445302 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod971d28d8_7a3b_4af0_a3e3_9ee9468dbca5.slice/crio-554ab924fc6017e95d7a1ffb2c01adf8294def3192ef65845e6ccdd3d19ba013 WatchSource:0}: Error finding container 554ab924fc6017e95d7a1ffb2c01adf8294def3192ef65845e6ccdd3d19ba013: Status 404 returned error can't find the container with id 554ab924fc6017e95d7a1ffb2c01adf8294def3192ef65845e6ccdd3d19ba013 Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.455803 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xptzp"] Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.489629 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.490635 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:30.990616471 +0000 UTC m=+173.318440415 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.527802 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-4ltjs"] Dec 06 05:22:30 crc kubenswrapper[4706]: W1206 05:22:30.545986 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod190e4233_a97e_4af7_8e7e_d66ccf827546.slice/crio-7a757ec14082286cb9beab3cd0ba18867c72509028b6e1141d24e6cdb12ebdc6 WatchSource:0}: Error finding container 7a757ec14082286cb9beab3cd0ba18867c72509028b6e1141d24e6cdb12ebdc6: Status 404 returned error can't find the container with id 7a757ec14082286cb9beab3cd0ba18867c72509028b6e1141d24e6cdb12ebdc6 Dec 06 05:22:30 crc kubenswrapper[4706]: W1206 05:22:30.549470 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6fdda9bf_9941_4f82_958f_22657d41aa74.slice/crio-4d65df55ae3cb5e2e8f31d54c6a108ce205dbd50638b046a6d5593a3837518c7 WatchSource:0}: Error finding container 4d65df55ae3cb5e2e8f31d54c6a108ce205dbd50638b046a6d5593a3837518c7: Status 404 returned error can't find the container with id 4d65df55ae3cb5e2e8f31d54c6a108ce205dbd50638b046a6d5593a3837518c7 Dec 06 05:22:30 crc kubenswrapper[4706]: W1206 05:22:30.554950 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55fbff71_b86d_4b25_9593_b48effb4fb7f.slice/crio-6636a81d4ad17b670e51f20dc62347b7ef8d9799e520b834bf46bde8ce178e64 WatchSource:0}: Error finding container 6636a81d4ad17b670e51f20dc62347b7ef8d9799e520b834bf46bde8ce178e64: Status 404 returned error can't find the container with id 6636a81d4ad17b670e51f20dc62347b7ef8d9799e520b834bf46bde8ce178e64 Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.592281 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.592694 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.092676615 +0000 UTC m=+173.420500559 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.693347 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.693797 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.193773773 +0000 UTC m=+173.521597717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.794416 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.794808 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.294796488 +0000 UTC m=+173.622620422 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.797013 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" event={"ID":"cba73644-0f32-4d53-9c68-e98d52909f9a","Type":"ContainerStarted","Data":"cf3b771db6e30f839c5253090b2d45a3828272f5ebc68f8e50b758fe277e8d0e"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.803611 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" event={"ID":"a68f594e-b151-4902-9792-b5d6051525dd","Type":"ContainerStarted","Data":"098b383364244ff0de0b38ae5b38d07bf039222e5b60071d148539bc91b8a924"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.809414 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" event={"ID":"6f29acc2-2357-4418-9680-e743ccba8702","Type":"ContainerStarted","Data":"697c5bc012837f4d8d8359f2f357d9012150f3eba98a7cfaf1b17774dc68a0e3"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.810823 4706 generic.go:334] "Generic (PLEG): container finished" podID="58a1210d-91bd-4a47-b70a-c8026a238565" containerID="20992c614eed777a5150f1bee1484f83d486debd2b6509bb16f812caa2d492b4" exitCode=0 Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.811233 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" event={"ID":"58a1210d-91bd-4a47-b70a-c8026a238565","Type":"ContainerDied","Data":"20992c614eed777a5150f1bee1484f83d486debd2b6509bb16f812caa2d492b4"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.825253 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" event={"ID":"d0ff41c4-87e4-4b8a-b7c9-f187a83f0dc1","Type":"ContainerStarted","Data":"310ab03c672a5059deb0b40c15700c82671ea05a79d3b805692c19a58a38aab4"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.835649 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-sj74z" podStartSLOduration=144.835625149 podStartE2EDuration="2m24.835625149s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:30.829265408 +0000 UTC m=+173.157089372" watchObservedRunningTime="2025-12-06 05:22:30.835625149 +0000 UTC m=+173.163449093" Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.837582 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" event={"ID":"76a85e06-bb22-4260-8a17-639478f9b3ca","Type":"ContainerStarted","Data":"48e89721dc6f843201f6a6f88de0cd784cbf4c436cfb8fcc5cbc9fb55c41685a"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.838339 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.841546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" event={"ID":"bd187a9c-688a-463f-a84a-6fb7c1df0360","Type":"ContainerStarted","Data":"0f8b796a13bc26249a8db345ca1ec7d52c477f774c6718d2b2add784286a2d8b"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.843614 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-bx4tb" event={"ID":"55fbff71-b86d-4b25-9593-b48effb4fb7f","Type":"ContainerStarted","Data":"6636a81d4ad17b670e51f20dc62347b7ef8d9799e520b834bf46bde8ce178e64"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.845252 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-bxpjq" event={"ID":"152ec06e-1c86-4db5-87a5-a96da88e008e","Type":"ContainerStarted","Data":"8b006764a40690ede69fbf257f7da6df66651013955ece99282575eeb1d8fd9d"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.856393 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" event={"ID":"11e93d60-f22e-4b73-b41c-72a9b55e4ff5","Type":"ContainerStarted","Data":"802d2767132d9c9f6d0f83c46c08d5e2192c8044f305726a83a15efcf9984411"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.857613 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" event={"ID":"e50331c5-c197-47f6-a27d-abd4fd31410f","Type":"ContainerStarted","Data":"e18c2732c8bd5f25e74caf7b346f5f67d6f9dcde4ce7a130935155e04e5f8169"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.880002 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" event={"ID":"4f34c9f6-5366-412e-a8b7-93837b5ea428","Type":"ContainerStarted","Data":"524130a08fef7a83cfca476e46a30b10eb0217c4c53409008966c55037d4959a"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.881069 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" event={"ID":"eb964fa0-1524-42ab-b399-8e9a7e7e3543","Type":"ContainerStarted","Data":"202a376656ac464e32b15ea13685f7bf3820c8ae72256325594b7d8d5ff0f951"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.882983 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-6xms4" event={"ID":"190e4233-a97e-4af7-8e7e-d66ccf827546","Type":"ContainerStarted","Data":"7a757ec14082286cb9beab3cd0ba18867c72509028b6e1141d24e6cdb12ebdc6"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.883852 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" event={"ID":"a2b02aaa-3dd3-462e-9dd6-c69748bc8511","Type":"ContainerStarted","Data":"8838d3d818bdc3f95545d32073a9260308ffeed727da992f6fbc56fa6e250a5a"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.891558 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" event={"ID":"39728d8c-03c4-42d3-999d-1dfe014cfb34","Type":"ContainerStarted","Data":"a30834a18e94286871139c36b5c098b472b3ee08655e6684f3943f04c12aa0ee"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.892694 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jk2pn" event={"ID":"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5","Type":"ContainerStarted","Data":"554ab924fc6017e95d7a1ffb2c01adf8294def3192ef65845e6ccdd3d19ba013"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.893394 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" event={"ID":"43dddc60-c6c0-48bb-9888-6cfb66efd812","Type":"ContainerStarted","Data":"270a430a78a2303b15ba10ba0198c4593360179fd761f888dd9a80f30a30e543"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.894209 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" event={"ID":"52ee1c0b-d021-43e3-a982-268e0af6f331","Type":"ContainerStarted","Data":"d9731717cc9b9e6f5d3ebd3065bd2e11126c4ca3886a7e865504a4d5a53d845d"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.895147 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.895871 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.395846755 +0000 UTC m=+173.723670689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.896510 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" event={"ID":"25f03df2-7ec0-403c-8b72-1933efc742f5","Type":"ContainerStarted","Data":"a05dd672a2cc9d7f222a90f48c2f841dc0919c7cbf8813623ddb32a5a069bd8a"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.898861 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" event={"ID":"e9405376-0114-4bee-b245-f17b30f2594a","Type":"ContainerStarted","Data":"ca2b3785e8020a03fbd09b8cf88c5110a7e7b23110ab5ef283eaa7594ff939a2"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.899567 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" event={"ID":"a417f08a-e64f-4a02-abb3-bee2049eb2e7","Type":"ContainerStarted","Data":"d3d5479bf77e183a2ef3074b9743eecf7e3a540265fc19ffdf09732ebbdd2fc4"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.900210 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" event={"ID":"28ae28d5-433c-4ce7-bb6e-2532d65b354d","Type":"ContainerStarted","Data":"686b7d90a9ffd498ea8f7e7fd89276e9cc55c7f0a0bdf06568b7db5c1289e91d"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.900805 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" event={"ID":"4edab72e-ed84-4e90-86da-02b3d3aa33bf","Type":"ContainerStarted","Data":"5c60d44c024cb3fa304933496845dfe8c2e9c8f9334eda41371c7996b42c785e"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.901427 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" event={"ID":"6fdda9bf-9941-4f82-958f-22657d41aa74","Type":"ContainerStarted","Data":"4d65df55ae3cb5e2e8f31d54c6a108ce205dbd50638b046a6d5593a3837518c7"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.903301 4706 generic.go:334] "Generic (PLEG): container finished" podID="445fbc3d-3a2f-4361-8444-badce4d8e564" containerID="25810a7add87ca617849f740353b79060f445c8bf27a36491e5a3835778706af" exitCode=0 Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.904085 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" event={"ID":"445fbc3d-3a2f-4361-8444-badce4d8e564","Type":"ContainerDied","Data":"25810a7add87ca617849f740353b79060f445c8bf27a36491e5a3835778706af"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.912194 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-l9d42" event={"ID":"de9628dc-df47-4a48-898b-f85d33e59452","Type":"ContainerStarted","Data":"01cdba7813554e5c4a85358932d99af2eee818896fae890bb84e7391a68c3b7f"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.913941 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" event={"ID":"f4065785-c72e-4c45-ab51-ce292be4f2ed","Type":"ContainerStarted","Data":"fa4fe2f65761924e80c05be872bf0d62da7f3ea4360c143c3e56cf7bb8af5fe1"} Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.924593 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-64pm9" podStartSLOduration=144.924562759 podStartE2EDuration="2m24.924562759s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:30.917319664 +0000 UTC m=+173.245143638" watchObservedRunningTime="2025-12-06 05:22:30.924562759 +0000 UTC m=+173.252386733" Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.937273 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" podStartSLOduration=144.937252702 podStartE2EDuration="2m24.937252702s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:30.937144059 +0000 UTC m=+173.264968013" watchObservedRunningTime="2025-12-06 05:22:30.937252702 +0000 UTC m=+173.265076646" Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.964556 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xnr66" podStartSLOduration=144.964536499 podStartE2EDuration="2m24.964536499s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:30.961131356 +0000 UTC m=+173.288955300" watchObservedRunningTime="2025-12-06 05:22:30.964536499 +0000 UTC m=+173.292360443" Dec 06 05:22:30 crc kubenswrapper[4706]: I1206 05:22:30.996964 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:30 crc kubenswrapper[4706]: E1206 05:22:30.999304 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.499280666 +0000 UTC m=+173.827104600 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.011081 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-l9d42" podStartSLOduration=145.011064333 podStartE2EDuration="2m25.011064333s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:31.010424336 +0000 UTC m=+173.338248310" watchObservedRunningTime="2025-12-06 05:22:31.011064333 +0000 UTC m=+173.338888287" Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.098350 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.098565 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.598527213 +0000 UTC m=+173.926351157 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.099871 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.100470 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.600461216 +0000 UTC m=+173.928285160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.200210 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.200540 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.700495765 +0000 UTC m=+174.028319749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.301504 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.302010 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.801963033 +0000 UTC m=+174.129786977 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.403423 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.403923 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:31.903900193 +0000 UTC m=+174.231724137 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.504780 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.505316 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.005298939 +0000 UTC m=+174.333122883 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.605938 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.606331 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.106311895 +0000 UTC m=+174.434135829 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.640572 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.642935 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.642976 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.708061 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.708570 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.208556223 +0000 UTC m=+174.536380167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.808940 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.809105 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.309079366 +0000 UTC m=+174.636903310 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.809296 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.809666 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.309658841 +0000 UTC m=+174.637482785 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.843394 4706 patch_prober.go:28] interesting pod/console-operator-58897d9998-zvsj5 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.843833 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" podUID="76a85e06-bb22-4260-8a17-639478f9b3ca" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.910153 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.910506 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.410485472 +0000 UTC m=+174.738309416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.910597 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:31 crc kubenswrapper[4706]: E1206 05:22:31.910919 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.410912103 +0000 UTC m=+174.738736047 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.922469 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" event={"ID":"6b76376c-f080-4458-a87a-84eab1e4b86d","Type":"ContainerStarted","Data":"9dd5abb96dc7d7756c8b52bdc845b1deeb2c31a44e5990ce12bcd854601d538d"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.923426 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" event={"ID":"57c94140-5c17-4423-82aa-e62f070fa68c","Type":"ContainerStarted","Data":"c20e5e261dca5ce361b69df8862f91319c829c5ab9a3a3bb1438b3b1d573167a"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.924813 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" event={"ID":"4edab72e-ed84-4e90-86da-02b3d3aa33bf","Type":"ContainerStarted","Data":"a411ac5921ff8e09e6d55bd79e9502d2d792e46ea05ca75ef2fb7df9bd42a5d1"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.928286 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" event={"ID":"eba39d45-3292-48d5-be72-9f948b5ff2fe","Type":"ContainerStarted","Data":"1c75bb2dd91ae70ce3254398828e4837a3187ead9de0ed3f0bd39f616a20e558"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.930024 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" event={"ID":"4230f0fb-f05e-4ae6-9755-db33865a6c33","Type":"ContainerStarted","Data":"fb463f1664b7599c98e3c436d23e655064f34a266d59f1bc89ca002998c0d4a4"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.931233 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" event={"ID":"a417f08a-e64f-4a02-abb3-bee2049eb2e7","Type":"ContainerStarted","Data":"e39db3224a84e1fa0d4c763b2922fc49ed05a0221167af90e907992bf1e494ae"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.932329 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-gm4d4" event={"ID":"23fe0ae4-ae9d-4470-871f-fb431c6c6c80","Type":"ContainerStarted","Data":"3f3f900b01c6770f2784e0c99f893d7001deddbb94ac88c7cf6168a9de170853"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.933384 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" event={"ID":"67f8479e-b919-4de1-8357-2fd41bf205a6","Type":"ContainerStarted","Data":"75ec16bbe9478f6078593303a5690341cb19f19a3ab4bede7277a2f4e2bbf38d"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.934581 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" event={"ID":"019227a7-15fd-4c90-8807-f5aef16b2b10","Type":"ContainerStarted","Data":"deb1ab250383a8ba6e7b9d70bee61143b1074a5a303f4b0535d0624b21440de4"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.935717 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" event={"ID":"eb964fa0-1524-42ab-b399-8e9a7e7e3543","Type":"ContainerStarted","Data":"c8a3cec7f77fb947707c9c5163c4419dbb871616c88fd4ced332faedfdaa9dab"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.936960 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-t4xd8" event={"ID":"ed24741b-5476-4f20-bd17-4c8686d40419","Type":"ContainerStarted","Data":"5ff09d37f29d70a08c9506117f34107ef1fe0dffa353358e0ca12f6f7dd35fcb"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.938781 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" event={"ID":"33133042-30b9-487e-8ee4-097e0faf7673","Type":"ContainerStarted","Data":"197ded23bcce1b540c9bbb644706e482a127cec2634ed80f3de845ef85a4c560"} Dec 06 05:22:31 crc kubenswrapper[4706]: I1206 05:22:31.946293 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-zvsj5" Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.011660 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.012125 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.512102933 +0000 UTC m=+174.839926877 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.113352 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.114711 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.614692901 +0000 UTC m=+174.942516955 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.214380 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.214589 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.714557666 +0000 UTC m=+175.042381610 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.214769 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.215136 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.715129272 +0000 UTC m=+175.042953216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.315601 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.316237 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.816209389 +0000 UTC m=+175.144033363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.418246 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.419364 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:32.919335242 +0000 UTC m=+175.247159226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.519626 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.520343 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.020292686 +0000 UTC m=+175.348116660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.520578 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.521079 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.021027165 +0000 UTC m=+175.348851119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.622120 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.622297 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.122262416 +0000 UTC m=+175.450086390 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.622499 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.623076 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.123032978 +0000 UTC m=+175.450856962 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.652574 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:32 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:32 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:32 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.652661 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.723368 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.723668 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.223646472 +0000 UTC m=+175.551470416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.825309 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.825791 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.325767128 +0000 UTC m=+175.653591072 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.926087 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.926359 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.426315931 +0000 UTC m=+175.754139875 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.926457 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:32 crc kubenswrapper[4706]: E1206 05:22:32.926890 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.426882136 +0000 UTC m=+175.754706080 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.946209 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" event={"ID":"52ee1c0b-d021-43e3-a982-268e0af6f331","Type":"ContainerStarted","Data":"49ca0637b9f31b3a15c87d7b27be42ffde0de935537f4810bb13e95ed19a8107"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.947620 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" event={"ID":"39728d8c-03c4-42d3-999d-1dfe014cfb34","Type":"ContainerStarted","Data":"72a35fd1caa4dd68f2228a6c426ff2fc121cea0a9cf2c8382b55c998a241e913"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.949536 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" event={"ID":"f5a20850-1d32-4041-881c-098e06ecd4f8","Type":"ContainerStarted","Data":"4f370349a950e755f8db6b935c890227c9d4eb5e1272032885e52d11b80cee58"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.951673 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" event={"ID":"6f29acc2-2357-4418-9680-e743ccba8702","Type":"ContainerStarted","Data":"489c479303a7db1571a9ffc5445ba4a931e70d8a84648f7fd7b093d83887ef0f"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.953312 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" event={"ID":"bd187a9c-688a-463f-a84a-6fb7c1df0360","Type":"ContainerStarted","Data":"e2d18aca5af341639a14b8b9734258ed27c79d872e22fb0d2fa47cab089de6f4"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.955951 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" event={"ID":"4f34c9f6-5366-412e-a8b7-93837b5ea428","Type":"ContainerStarted","Data":"17d54770631c1d89f5721c51f1c5a1f2cd2a35dbc32904f436a1121ae6dabfd7"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.958292 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" event={"ID":"3a68cbce-a0d0-4128-b5fc-ba2664947314","Type":"ContainerStarted","Data":"194b8339807c23b1f1822cb56b85f1f8c4098d69a2891a5351589b63f7bd716d"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.960423 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" event={"ID":"43dddc60-c6c0-48bb-9888-6cfb66efd812","Type":"ContainerStarted","Data":"75b64c83c98167a6820a3661b0bb0f9bc3134650977be4b753653ae58238ccb8"} Dec 06 05:22:32 crc kubenswrapper[4706]: I1206 05:22:32.962035 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" event={"ID":"6fdda9bf-9941-4f82-958f-22657d41aa74","Type":"ContainerStarted","Data":"908154a80e2891b6c9fdc996b5a0cfe97aa2f7a1622ba70fff73ecb822bdbf2d"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.027739 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.028132 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.528106307 +0000 UTC m=+175.855930261 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.028189 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.028688 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.528656882 +0000 UTC m=+175.856481026 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.130828 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.131464 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.631399804 +0000 UTC m=+175.959223758 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.131732 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.132089 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.632075722 +0000 UTC m=+175.959899666 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.233510 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.233846 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.733806238 +0000 UTC m=+176.061630192 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.233997 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.234540 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.734520537 +0000 UTC m=+176.062344481 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.335214 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.335432 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.835397339 +0000 UTC m=+176.163221283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.336079 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.336433 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.836425396 +0000 UTC m=+176.164249340 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.437417 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.437618 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.937599286 +0000 UTC m=+176.265423230 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.437786 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.438264 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:33.938254274 +0000 UTC m=+176.266078218 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.539368 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.540337 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.040299727 +0000 UTC m=+176.368123711 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.540735 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.541430 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.041414167 +0000 UTC m=+176.369238151 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.641961 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.642417 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.142273458 +0000 UTC m=+176.470097432 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.642533 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.643090 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.14304054 +0000 UTC m=+176.470864514 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.649097 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:33 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:33 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:33 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.649191 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.744005 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.744272 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.24422433 +0000 UTC m=+176.572048304 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.744405 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.744882 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.244866627 +0000 UTC m=+176.572690601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.846206 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.847075 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.347021643 +0000 UTC m=+176.674845617 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.847534 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.848112 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.348095302 +0000 UTC m=+176.675919276 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.949583 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.949812 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.449778176 +0000 UTC m=+176.777602150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.950003 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:33 crc kubenswrapper[4706]: E1206 05:22:33.950490 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.450473134 +0000 UTC m=+176.778297118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.969322 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" event={"ID":"cba73644-0f32-4d53-9c68-e98d52909f9a","Type":"ContainerStarted","Data":"999a64d5905eda828f927b621ef2996bfd64fb34425313c39ac218f97088bc8d"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.971367 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-6xms4" event={"ID":"190e4233-a97e-4af7-8e7e-d66ccf827546","Type":"ContainerStarted","Data":"73bcc794f3f64c81746ea4a22fdb90f1fc186b07fb75552af94df94b5543e3c1"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.973352 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" event={"ID":"a4df44f2-c01b-47ab-a7df-6b30ea0510a3","Type":"ContainerStarted","Data":"a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.975107 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" event={"ID":"e50331c5-c197-47f6-a27d-abd4fd31410f","Type":"ContainerStarted","Data":"f95ffd1f7d902f2995b1ef98482906901ce151d1dfb3b7169f2bd2e421c63e1c"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.978386 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" event={"ID":"11e93d60-f22e-4b73-b41c-72a9b55e4ff5","Type":"ContainerStarted","Data":"d88f96095ded35a209d8edf918dad7d9f113a2e4f84faa2be005ecf66e603b75"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.983170 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" event={"ID":"f05088c1-1548-4c56-8e14-3610540dec5c","Type":"ContainerStarted","Data":"6f6354cc72b17690ba79356c557d85d14f707789317a419784f544d24a0fdaab"} Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.983939 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.986700 4706 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-jslph container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Dec 06 05:22:33 crc kubenswrapper[4706]: I1206 05:22:33.986781 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.013447 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" podStartSLOduration=148.013420953 podStartE2EDuration="2m28.013420953s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:34.01070514 +0000 UTC m=+176.338529174" watchObservedRunningTime="2025-12-06 05:22:34.013420953 +0000 UTC m=+176.341244927" Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.051161 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.052671 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.552642451 +0000 UTC m=+176.880466435 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.154799 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.155157 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.655141216 +0000 UTC m=+176.982965160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.256196 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.256552 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.756530773 +0000 UTC m=+177.084354717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.358178 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.358603 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.858586386 +0000 UTC m=+177.186410330 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.459417 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.459725 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:34.959705444 +0000 UTC m=+177.287529388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.561553 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.562145 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.062117808 +0000 UTC m=+177.389941752 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.645006 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:34 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:34 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:34 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.645643 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.663002 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.663240 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.163201805 +0000 UTC m=+177.491025749 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.663296 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.663662 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.163653617 +0000 UTC m=+177.491477561 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.764911 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.765188 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.265149876 +0000 UTC m=+177.592973820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.765333 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.765818 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.265810143 +0000 UTC m=+177.593634087 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.867105 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.867329 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.367285872 +0000 UTC m=+177.695109826 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.867878 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.868380 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.368369881 +0000 UTC m=+177.696193825 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.971285 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.971465 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.471441842 +0000 UTC m=+177.799265786 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.971826 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:34 crc kubenswrapper[4706]: E1206 05:22:34.972191 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.472184062 +0000 UTC m=+177.800008006 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.992766 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" event={"ID":"a2b02aaa-3dd3-462e-9dd6-c69748bc8511","Type":"ContainerStarted","Data":"d8fa143159c2e69f92456abd1ffe222496fc135ea403cd078f8499363a1cfca8"} Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.996197 4706 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-jslph container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Dec 06 05:22:34 crc kubenswrapper[4706]: I1206 05:22:34.996259 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.013490 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-xpmpp" podStartSLOduration=149.013462406 podStartE2EDuration="2m29.013462406s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:35.013136757 +0000 UTC m=+177.340960701" watchObservedRunningTime="2025-12-06 05:22:35.013462406 +0000 UTC m=+177.341286370" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.073550 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.073771 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.573731652 +0000 UTC m=+177.901555616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.074079 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.074599 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.574577005 +0000 UTC m=+177.902400949 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.175437 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.675408876 +0000 UTC m=+178.003232860 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.175290 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.175813 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.176309 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.67629475 +0000 UTC m=+178.004118734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.230021 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.231230 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.234838 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.237201 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.259014 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.277394 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.277747 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.777694735 +0000 UTC m=+178.105518719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.277830 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/602cc6c6-054f-46bd-a044-0ed68baa41ad-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.277895 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.277928 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/602cc6c6-054f-46bd-a044-0ed68baa41ad-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.278494 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.778472986 +0000 UTC m=+178.106297090 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.379527 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.379708 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.879677237 +0000 UTC m=+178.207501181 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.379927 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/602cc6c6-054f-46bd-a044-0ed68baa41ad-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.380128 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/602cc6c6-054f-46bd-a044-0ed68baa41ad-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.380242 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/602cc6c6-054f-46bd-a044-0ed68baa41ad-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.380768 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.880723095 +0000 UTC m=+178.208547059 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.380976 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.401227 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/602cc6c6-054f-46bd-a044-0ed68baa41ad-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.482558 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.482833 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.982767048 +0000 UTC m=+178.310590992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.482947 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.483321 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:35.983313314 +0000 UTC m=+178.311137258 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.557350 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.584374 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.584560 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.084522595 +0000 UTC m=+178.412346539 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.584890 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.585231 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.085222423 +0000 UTC m=+178.413046367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.644371 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:35 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:35 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:35 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.644574 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.678618 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-98kq2"] Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.679721 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.681748 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.685756 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.685906 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.185886989 +0000 UTC m=+178.513710933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.686007 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.686345 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.186337621 +0000 UTC m=+178.514161565 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.692347 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-98kq2"] Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.787091 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.787257 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.287235634 +0000 UTC m=+178.615059578 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.787374 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-utilities\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.787423 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.787447 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-catalog-content\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.787497 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srtrx\" (UniqueName: \"kubernetes.io/projected/2985a55d-3af2-4dd6-adde-7714459e08c3-kube-api-access-srtrx\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.788100 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.288090907 +0000 UTC m=+178.615914841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.853005 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.888534 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.888786 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.388723372 +0000 UTC m=+178.716547316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.888886 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.888988 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-catalog-content\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.889275 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.389257536 +0000 UTC m=+178.717081720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.889312 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srtrx\" (UniqueName: \"kubernetes.io/projected/2985a55d-3af2-4dd6-adde-7714459e08c3-kube-api-access-srtrx\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.889540 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-catalog-content\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.889774 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-utilities\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.890259 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-utilities\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.910403 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srtrx\" (UniqueName: \"kubernetes.io/projected/2985a55d-3af2-4dd6-adde-7714459e08c3-kube-api-access-srtrx\") pod \"community-operators-98kq2\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.961939 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.962008 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.990876 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.991064 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.491025073 +0000 UTC m=+178.818849017 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.991206 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:35 crc kubenswrapper[4706]: E1206 05:22:35.991577 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.491568117 +0000 UTC m=+178.819392061 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.994790 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"602cc6c6-054f-46bd-a044-0ed68baa41ad","Type":"ContainerStarted","Data":"f25182cec79d28bf54a1865342f9dfed95b0862c121fe9256a2b7c1289b2ebd3"} Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.996994 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:22:35 crc kubenswrapper[4706]: I1206 05:22:35.998146 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" event={"ID":"e9405376-0114-4bee-b245-f17b30f2594a","Type":"ContainerStarted","Data":"128a6e82768f0cec748a1a7c63155a2de1ce3d17db05d8e9bc6454ebf5d1e6c3"} Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.000214 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-bx4tb" event={"ID":"55fbff71-b86d-4b25-9593-b48effb4fb7f","Type":"ContainerStarted","Data":"5454b2d5d1bebf5e30b52c55e351702349ec2507d8d3076597a48569ccc259b6"} Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.014690 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-gm4d4" podStartSLOduration=11.01467586 podStartE2EDuration="11.01467586s" podCreationTimestamp="2025-12-06 05:22:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:36.012978625 +0000 UTC m=+178.340802569" watchObservedRunningTime="2025-12-06 05:22:36.01467586 +0000 UTC m=+178.342499804" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.095184 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.108607 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.608562714 +0000 UTC m=+178.936386698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.122282 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9hq8t"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.124751 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.137556 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j5lbl"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.147300 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.156220 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9hq8t"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.160619 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.169243 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j5lbl"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199105 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wttgm\" (UniqueName: \"kubernetes.io/projected/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-kube-api-access-wttgm\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199175 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-utilities\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199210 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-catalog-content\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199230 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-utilities\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199253 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-catalog-content\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199288 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.199336 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkbvx\" (UniqueName: \"kubernetes.io/projected/fac9e7cf-4919-4a48-b314-f9b985397e7e-kube-api-access-wkbvx\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.199825 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.699809415 +0000 UTC m=+179.027633359 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.270476 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-svf4w"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.271641 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.284804 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-svf4w"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304409 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304592 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-catalog-content\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304614 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-utilities\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304638 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-catalog-content\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304700 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkbvx\" (UniqueName: \"kubernetes.io/projected/fac9e7cf-4919-4a48-b314-f9b985397e7e-kube-api-access-wkbvx\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304752 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wttgm\" (UniqueName: \"kubernetes.io/projected/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-kube-api-access-wttgm\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304770 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-utilities\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304792 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-catalog-content\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304816 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-utilities\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.304837 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnjm6\" (UniqueName: \"kubernetes.io/projected/514a779d-1633-49f5-a991-5a80d8714c19-kube-api-access-rnjm6\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.304949 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.804930352 +0000 UTC m=+179.132754296 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.305306 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-catalog-content\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.308770 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-utilities\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.309147 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-catalog-content\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.309902 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-utilities\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.344248 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkbvx\" (UniqueName: \"kubernetes.io/projected/fac9e7cf-4919-4a48-b314-f9b985397e7e-kube-api-access-wkbvx\") pod \"community-operators-9hq8t\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.348071 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wttgm\" (UniqueName: \"kubernetes.io/projected/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-kube-api-access-wttgm\") pod \"certified-operators-j5lbl\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.370985 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-98kq2"] Dec 06 05:22:36 crc kubenswrapper[4706]: W1206 05:22:36.389965 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2985a55d_3af2_4dd6_adde_7714459e08c3.slice/crio-50065bd8f40f80eb560942fc41aa0793235313ff93ad9c241e8d8f6c2c6cad2f WatchSource:0}: Error finding container 50065bd8f40f80eb560942fc41aa0793235313ff93ad9c241e8d8f6c2c6cad2f: Status 404 returned error can't find the container with id 50065bd8f40f80eb560942fc41aa0793235313ff93ad9c241e8d8f6c2c6cad2f Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.405990 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-catalog-content\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.406037 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-utilities\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.406075 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnjm6\" (UniqueName: \"kubernetes.io/projected/514a779d-1633-49f5-a991-5a80d8714c19-kube-api-access-rnjm6\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.406119 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.406576 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:36.906558874 +0000 UTC m=+179.234382818 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.407020 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-catalog-content\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.407260 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-utilities\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.465978 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnjm6\" (UniqueName: \"kubernetes.io/projected/514a779d-1633-49f5-a991-5a80d8714c19-kube-api-access-rnjm6\") pod \"certified-operators-svf4w\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.507767 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.508074 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.008006591 +0000 UTC m=+179.335830535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.508192 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.509374 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.009342788 +0000 UTC m=+179.337166732 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.518691 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.527471 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.598865 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.612896 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.613339 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.113321853 +0000 UTC m=+179.441145797 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.659344 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:36 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:36 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:36 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.659424 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.715583 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.716081 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.216065486 +0000 UTC m=+179.543889430 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.805085 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9hq8t"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.818553 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.818970 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.318952311 +0000 UTC m=+179.646776255 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.828665 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j5lbl"] Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.920191 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:36 crc kubenswrapper[4706]: E1206 05:22:36.920601 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.420586894 +0000 UTC m=+179.748410828 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:36 crc kubenswrapper[4706]: I1206 05:22:36.940525 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-svf4w"] Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.009408 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerStarted","Data":"7e4d892a7209523964ebbed9009156c22bcf9790f50d3447d7d3a433d4fdc750"} Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.010410 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerStarted","Data":"9de2f95bfc2b2a567aaa56a74eb7dcf1e157287d8d82bf197c8c930199456ca8"} Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.013034 4706 generic.go:334] "Generic (PLEG): container finished" podID="f05088c1-1548-4c56-8e14-3610540dec5c" containerID="6f6354cc72b17690ba79356c557d85d14f707789317a419784f544d24a0fdaab" exitCode=0 Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.013163 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" event={"ID":"f05088c1-1548-4c56-8e14-3610540dec5c","Type":"ContainerDied","Data":"6f6354cc72b17690ba79356c557d85d14f707789317a419784f544d24a0fdaab"} Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.015829 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jk2pn" event={"ID":"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5","Type":"ContainerStarted","Data":"4d249ae430046e146d8d4d018d020406c70941f542eaa1422d95711c36bb66b8"} Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.017594 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerStarted","Data":"50065bd8f40f80eb560942fc41aa0793235313ff93ad9c241e8d8f6c2c6cad2f"} Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.021165 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.021371 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.521348613 +0000 UTC m=+179.849172557 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.021409 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.021725 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.521718473 +0000 UTC m=+179.849542407 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.060812 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-qld9m" podStartSLOduration=151.060783227 podStartE2EDuration="2m31.060783227s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.039925114 +0000 UTC m=+179.367749068" watchObservedRunningTime="2025-12-06 05:22:37.060783227 +0000 UTC m=+179.388607171" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.061135 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-wdsds" podStartSLOduration=151.061130836 podStartE2EDuration="2m31.061130836s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.057755765 +0000 UTC m=+179.385579709" watchObservedRunningTime="2025-12-06 05:22:37.061130836 +0000 UTC m=+179.388954780" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.085971 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" podStartSLOduration=151.085949566 podStartE2EDuration="2m31.085949566s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.078398392 +0000 UTC m=+179.406222336" watchObservedRunningTime="2025-12-06 05:22:37.085949566 +0000 UTC m=+179.413773520" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.103262 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" podStartSLOduration=151.103240822 podStartE2EDuration="2m31.103240822s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.100870018 +0000 UTC m=+179.428693982" watchObservedRunningTime="2025-12-06 05:22:37.103240822 +0000 UTC m=+179.431064766" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.120632 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-5tbsf" podStartSLOduration=151.120607371 podStartE2EDuration="2m31.120607371s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.12057921 +0000 UTC m=+179.448403154" watchObservedRunningTime="2025-12-06 05:22:37.120607371 +0000 UTC m=+179.448431315" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.122252 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.123243 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.623223271 +0000 UTC m=+179.951047215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.146651 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-5g2s4" podStartSLOduration=151.146622252 podStartE2EDuration="2m31.146622252s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.14393549 +0000 UTC m=+179.471759454" watchObservedRunningTime="2025-12-06 05:22:37.146622252 +0000 UTC m=+179.474446196" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.224768 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.225169 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.725147821 +0000 UTC m=+180.052971765 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.326427 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.326834 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.826788524 +0000 UTC m=+180.154612498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.428607 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.429205 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:37.929172226 +0000 UTC m=+180.256996200 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: W1206 05:22:37.465444 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod514a779d_1633_49f5_a991_5a80d8714c19.slice/crio-856b38db7af56babe4f10b056895e61e898422e885e971b94b226a4d3bb9745c WatchSource:0}: Error finding container 856b38db7af56babe4f10b056895e61e898422e885e971b94b226a4d3bb9745c: Status 404 returned error can't find the container with id 856b38db7af56babe4f10b056895e61e898422e885e971b94b226a4d3bb9745c Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.531264 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.531495 4706 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-jslph container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.531571 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.031538049 +0000 UTC m=+180.359361993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.531916 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.532140 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.532522 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.032507285 +0000 UTC m=+180.360331229 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.633298 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.633888 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.133868799 +0000 UTC m=+180.461692743 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.640760 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.645256 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:37 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:37 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:37 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.645307 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.653002 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.676273 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-hggv7" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.695281 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-t4xd8" podStartSLOduration=151.695261646 podStartE2EDuration="2m31.695261646s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:37.171593096 +0000 UTC m=+179.499417040" watchObservedRunningTime="2025-12-06 05:22:37.695261646 +0000 UTC m=+180.023085590" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.735245 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.735682 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.235668897 +0000 UTC m=+180.563492841 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.837327 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.837681 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.337659038 +0000 UTC m=+180.665482992 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.854426 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.854484 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.857570 4706 patch_prober.go:28] interesting pod/console-f9d7485db-t4xd8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.857642 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-t4xd8" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.868629 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wx94f"] Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.869672 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.871631 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.884480 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wx94f"] Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.939538 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.939619 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-utilities\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.939674 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44ghq\" (UniqueName: \"kubernetes.io/projected/404f2b83-1030-4b10-b1cf-c7db67aae01f-kube-api-access-44ghq\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:37 crc kubenswrapper[4706]: I1206 05:22:37.939737 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-catalog-content\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:37 crc kubenswrapper[4706]: E1206 05:22:37.940021 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.43999495 +0000 UTC m=+180.767819074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.031936 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" event={"ID":"f4065785-c72e-4c45-ab51-ce292be4f2ed","Type":"ContainerStarted","Data":"036f6ab00f909a4341bbe03e9573255d212d0f3cd815819a6f0496ef30222834"} Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.034553 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" event={"ID":"33133042-30b9-487e-8ee4-097e0faf7673","Type":"ContainerStarted","Data":"b3df0b705744b3d625f354293b7d2b7c3697f217aad409525feb199433df4637"} Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.041715 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.041875 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.541841998 +0000 UTC m=+180.869665942 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.042021 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-utilities\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.042076 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44ghq\" (UniqueName: \"kubernetes.io/projected/404f2b83-1030-4b10-b1cf-c7db67aae01f-kube-api-access-44ghq\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.042179 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-catalog-content\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.042224 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.042583 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.542567497 +0000 UTC m=+180.870391441 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.042663 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-utilities\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.042993 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-catalog-content\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.066339 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" event={"ID":"445fbc3d-3a2f-4361-8444-badce4d8e564","Type":"ContainerStarted","Data":"4bbf3e2edd91253ec3c45c510ca44204646e6a7d1e178fa740aefea43cd8a174"} Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.066764 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" event={"ID":"58a1210d-91bd-4a47-b70a-c8026a238565","Type":"ContainerStarted","Data":"25099135ded45a476f7abaa14ae1747c1cda86990f0816a5f380b81ee6d4a2cb"} Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.066780 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" event={"ID":"28ae28d5-433c-4ce7-bb6e-2532d65b354d","Type":"ContainerStarted","Data":"f304ca7a86585bfabd623b35089102860248b474a321ce73ed25d7683bd7463e"} Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.066792 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-svf4w" event={"ID":"514a779d-1633-49f5-a991-5a80d8714c19","Type":"ContainerStarted","Data":"856b38db7af56babe4f10b056895e61e898422e885e971b94b226a4d3bb9745c"} Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.074942 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44ghq\" (UniqueName: \"kubernetes.io/projected/404f2b83-1030-4b10-b1cf-c7db67aae01f-kube-api-access-44ghq\") pod \"redhat-marketplace-wx94f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.142938 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.148715 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.64868653 +0000 UTC m=+180.976510474 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.185248 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.194098 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.245177 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.245994 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.745979646 +0000 UTC m=+181.073803590 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.274724 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dwthx"] Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.275829 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.289542 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.295647 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwthx"] Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.297580 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.301272 4706 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xptzp container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.301345 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.301835 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-pzbsr" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.302201 4706 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xptzp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.302260 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.347114 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.347507 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngsgc\" (UniqueName: \"kubernetes.io/projected/cf5c1feb-f09b-41c2-9974-56538ccc281f-kube-api-access-ngsgc\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.347533 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-utilities\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.347561 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-catalog-content\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.347655 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.847637868 +0000 UTC m=+181.175461812 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.449388 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngsgc\" (UniqueName: \"kubernetes.io/projected/cf5c1feb-f09b-41c2-9974-56538ccc281f-kube-api-access-ngsgc\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.449866 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-utilities\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.449903 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-catalog-content\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.449949 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.450457 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:38.950430672 +0000 UTC m=+181.278254616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.450956 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-utilities\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.451226 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-catalog-content\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.481623 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngsgc\" (UniqueName: \"kubernetes.io/projected/cf5c1feb-f09b-41c2-9974-56538ccc281f-kube-api-access-ngsgc\") pod \"redhat-marketplace-dwthx\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.536651 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wx94f"] Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.551393 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.551611 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.051566281 +0000 UTC m=+181.379390225 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.553513 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.553998 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.053973956 +0000 UTC m=+181.381797900 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.626411 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.657571 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.658113 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.158091775 +0000 UTC m=+181.485915719 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.664863 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:38 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:38 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:38 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.664909 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.680402 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-9gzzw" podStartSLOduration=152.680382637 podStartE2EDuration="2m32.680382637s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:38.679329228 +0000 UTC m=+181.007153172" watchObservedRunningTime="2025-12-06 05:22:38.680382637 +0000 UTC m=+181.008206581" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.706830 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" podStartSLOduration=152.706805849 podStartE2EDuration="2m32.706805849s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:38.704715363 +0000 UTC m=+181.032539317" watchObservedRunningTime="2025-12-06 05:22:38.706805849 +0000 UTC m=+181.034629793" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.760200 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.760722 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.260699963 +0000 UTC m=+181.588523907 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.765985 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-n6bpb" podStartSLOduration=153.765937264 podStartE2EDuration="2m33.765937264s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:38.755433832 +0000 UTC m=+181.083257786" watchObservedRunningTime="2025-12-06 05:22:38.765937264 +0000 UTC m=+181.093761208" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.810850 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" podStartSLOduration=152.810829786 podStartE2EDuration="2m32.810829786s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:38.807562728 +0000 UTC m=+181.135386682" watchObservedRunningTime="2025-12-06 05:22:38.810829786 +0000 UTC m=+181.138653730" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.861935 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.862193 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.362158141 +0000 UTC m=+181.689982085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.862587 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.863158 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.363134767 +0000 UTC m=+181.690958711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.867672 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j4frb"] Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.869372 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.871963 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.889146 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j4frb"] Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.960328 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwthx"] Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.964093 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.964350 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.464312227 +0000 UTC m=+181.792136171 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.964449 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-catalog-content\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.964652 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.964832 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-utilities\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:38 crc kubenswrapper[4706]: I1206 05:22:38.964927 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tkt56\" (UniqueName: \"kubernetes.io/projected/2d21abc6-d736-47df-8eac-4dee0691a92c-kube-api-access-tkt56\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:38 crc kubenswrapper[4706]: E1206 05:22:38.965537 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.46551475 +0000 UTC m=+181.793338694 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:38 crc kubenswrapper[4706]: W1206 05:22:38.967861 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf5c1feb_f09b_41c2_9974_56538ccc281f.slice/crio-9bed54aa84ef664d23bd59f8630b572457303dfcf83bdee0fd2fc918869dd22d WatchSource:0}: Error finding container 9bed54aa84ef664d23bd59f8630b572457303dfcf83bdee0fd2fc918869dd22d: Status 404 returned error can't find the container with id 9bed54aa84ef664d23bd59f8630b572457303dfcf83bdee0fd2fc918869dd22d Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.066549 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.066739 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.56668567 +0000 UTC m=+181.894509624 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.066942 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-catalog-content\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.067083 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.067219 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-utilities\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.067307 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tkt56\" (UniqueName: \"kubernetes.io/projected/2d21abc6-d736-47df-8eac-4dee0691a92c-kube-api-access-tkt56\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.067475 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.56745828 +0000 UTC m=+181.895282224 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.067968 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-utilities\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.068937 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerStarted","Data":"9bed54aa84ef664d23bd59f8630b572457303dfcf83bdee0fd2fc918869dd22d"} Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.069682 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-catalog-content\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.070755 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wx94f" event={"ID":"404f2b83-1030-4b10-b1cf-c7db67aae01f","Type":"ContainerStarted","Data":"0ef2b4d64cbdd444c63d782b02b7af1aa85fc9fbc22031a69432b324ee37d2ca"} Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.071642 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.071687 4706 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xptzp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.071750 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.073961 4706 patch_prober.go:28] interesting pod/downloads-7954f5f757-6xms4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.074096 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-6xms4" podUID="190e4233-a97e-4af7-8e7e-d66ccf827546" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.089631 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-6xms4" podStartSLOduration=153.089600908 podStartE2EDuration="2m33.089600908s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:39.085772205 +0000 UTC m=+181.413596199" watchObservedRunningTime="2025-12-06 05:22:39.089600908 +0000 UTC m=+181.417424862" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.093747 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tkt56\" (UniqueName: \"kubernetes.io/projected/2d21abc6-d736-47df-8eac-4dee0691a92c-kube-api-access-tkt56\") pod \"redhat-operators-j4frb\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.102412 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-bx4tb" podStartSLOduration=15.102390773 podStartE2EDuration="15.102390773s" podCreationTimestamp="2025-12-06 05:22:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:39.099938017 +0000 UTC m=+181.427762011" watchObservedRunningTime="2025-12-06 05:22:39.102390773 +0000 UTC m=+181.430214717" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.126720 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" podStartSLOduration=154.126699129 podStartE2EDuration="2m34.126699129s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:39.122606979 +0000 UTC m=+181.450430953" watchObservedRunningTime="2025-12-06 05:22:39.126699129 +0000 UTC m=+181.454523073" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.142120 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-kcwkb" podStartSLOduration=153.142091455 podStartE2EDuration="2m33.142091455s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:39.139285009 +0000 UTC m=+181.467108973" watchObservedRunningTime="2025-12-06 05:22:39.142091455 +0000 UTC m=+181.469915409" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.168405 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jgqp9" podStartSLOduration=153.168375203 podStartE2EDuration="2m33.168375203s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:39.16452917 +0000 UTC m=+181.492353124" watchObservedRunningTime="2025-12-06 05:22:39.168375203 +0000 UTC m=+181.496199157" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.169251 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.169475 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.669444042 +0000 UTC m=+181.997267996 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.170404 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.178315 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.678295432 +0000 UTC m=+182.006119376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.185414 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.215126 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" podStartSLOduration=154.215101175 podStartE2EDuration="2m34.215101175s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:39.214297632 +0000 UTC m=+181.542121586" watchObservedRunningTime="2025-12-06 05:22:39.215101175 +0000 UTC m=+181.542925139" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.273647 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r2clj"] Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.275658 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.275913 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.277041 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.777020055 +0000 UTC m=+182.104843999 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.304728 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r2clj"] Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.379012 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.379497 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-catalog-content\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.379552 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89xk8\" (UniqueName: \"kubernetes.io/projected/52e328e7-19c9-4412-96f0-582cd5add7c5-kube-api-access-89xk8\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.379608 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-utilities\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.379684 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.879644153 +0000 UTC m=+182.207468087 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.459340 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j4frb"] Dec 06 05:22:39 crc kubenswrapper[4706]: W1206 05:22:39.469862 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d21abc6_d736_47df_8eac_4dee0691a92c.slice/crio-ceb960ccff7e4638e02d6ce722ec332bae7b6006917dec1a7b92ae581d4d2497 WatchSource:0}: Error finding container ceb960ccff7e4638e02d6ce722ec332bae7b6006917dec1a7b92ae581d4d2497: Status 404 returned error can't find the container with id ceb960ccff7e4638e02d6ce722ec332bae7b6006917dec1a7b92ae581d4d2497 Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.484138 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.484183 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.984159664 +0000 UTC m=+182.311983608 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.484639 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-utilities\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.484862 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.484894 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-catalog-content\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.485035 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89xk8\" (UniqueName: \"kubernetes.io/projected/52e328e7-19c9-4412-96f0-582cd5add7c5-kube-api-access-89xk8\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.485380 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:39.985352926 +0000 UTC m=+182.313176870 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.486036 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-utilities\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.486142 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-catalog-content\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.503675 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89xk8\" (UniqueName: \"kubernetes.io/projected/52e328e7-19c9-4412-96f0-582cd5add7c5-kube-api-access-89xk8\") pod \"redhat-operators-r2clj\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.586519 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.586736 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.086670049 +0000 UTC m=+182.414493993 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.586809 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.587171 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.087163733 +0000 UTC m=+182.414987677 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.607466 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.646852 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:39 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:39 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:39 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.646934 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.688584 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.688971 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.18894922 +0000 UTC m=+182.516773164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.791897 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.792295 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.292281128 +0000 UTC m=+182.620105072 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.896485 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.896668 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.396626423 +0000 UTC m=+182.724450387 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.896987 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.897544 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.397528687 +0000 UTC m=+182.725352851 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.993246 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r2clj"] Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.998867 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:39 crc kubenswrapper[4706]: E1206 05:22:39.999116 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.499084878 +0000 UTC m=+182.826908832 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:39 crc kubenswrapper[4706]: I1206 05:22:39.999407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.000160 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.500071434 +0000 UTC m=+182.827895408 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: W1206 05:22:40.003197 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod52e328e7_19c9_4412_96f0_582cd5add7c5.slice/crio-2f7012bed2d7108013af6192deca337367240d4c707d579a0fdcfa92d0671509 WatchSource:0}: Error finding container 2f7012bed2d7108013af6192deca337367240d4c707d579a0fdcfa92d0671509: Status 404 returned error can't find the container with id 2f7012bed2d7108013af6192deca337367240d4c707d579a0fdcfa92d0671509 Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.077339 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerStarted","Data":"ceb960ccff7e4638e02d6ce722ec332bae7b6006917dec1a7b92ae581d4d2497"} Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.079367 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerStarted","Data":"2437e439b6f7daf1b036314ee54767cb3812b9bdce40f622d96a382a98f8e597"} Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.081664 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerStarted","Data":"2f7012bed2d7108013af6192deca337367240d4c707d579a0fdcfa92d0671509"} Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.082454 4706 patch_prober.go:28] interesting pod/downloads-7954f5f757-6xms4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.082527 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-6xms4" podUID="190e4233-a97e-4af7-8e7e-d66ccf827546" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.100669 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.101328 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.601294475 +0000 UTC m=+182.929118419 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.202547 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.203194 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.703175264 +0000 UTC m=+183.030999208 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.304232 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.304459 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.804426317 +0000 UTC m=+183.132250261 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.304519 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.305160 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.805148816 +0000 UTC m=+183.132972760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.405847 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.406207 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:40.906189492 +0000 UTC m=+183.234013436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.507296 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.507866 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.007833684 +0000 UTC m=+183.335657818 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.608075 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.608375 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.108340466 +0000 UTC m=+183.436164420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.643357 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:40 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:40 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:40 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.643439 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.709754 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.710112 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.210096982 +0000 UTC m=+183.537920926 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.810994 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.812510 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.312459404 +0000 UTC m=+183.640283378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:40 crc kubenswrapper[4706]: I1206 05:22:40.913528 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:40 crc kubenswrapper[4706]: E1206 05:22:40.914253 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.414215709 +0000 UTC m=+183.742039693 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.015374 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.015655 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.515613625 +0000 UTC m=+183.843437599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.015935 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.016467 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.516412547 +0000 UTC m=+183.844236521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.117617 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.117881 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.617821723 +0000 UTC m=+183.945645727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.117956 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.118581 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.618562503 +0000 UTC m=+183.946386517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.219574 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.219828 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.719775514 +0000 UTC m=+184.047599498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.220264 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.220706 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.720668759 +0000 UTC m=+184.048492703 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.233370 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.234151 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.237963 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.238543 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.248337 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.321396 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.321592 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.321624 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.821588632 +0000 UTC m=+184.149412616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.321972 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.423388 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.423452 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.423480 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.423545 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.424013 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:41.923989994 +0000 UTC m=+184.251813958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.458539 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.524829 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.525066 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.025007 +0000 UTC m=+184.352830944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.525333 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.525814 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.025802512 +0000 UTC m=+184.353626456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.626804 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.627216 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.127195827 +0000 UTC m=+184.455019771 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.627102 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.628197 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.628566 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.128557494 +0000 UTC m=+184.456381438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.645318 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:41 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:41 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:41 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.645465 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.729840 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.730079 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.230025711 +0000 UTC m=+184.557849655 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.730146 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.730453 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.230441223 +0000 UTC m=+184.558265167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.831859 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.832109 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.332082965 +0000 UTC m=+184.659906909 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.832501 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.832850 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.332841326 +0000 UTC m=+184.660665270 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.859405 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.933623 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.933881 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.433842321 +0000 UTC m=+184.761666275 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:41 crc kubenswrapper[4706]: I1206 05:22:41.934433 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:41 crc kubenswrapper[4706]: E1206 05:22:41.934884 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.434872809 +0000 UTC m=+184.762696763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.035311 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.035792 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.535772312 +0000 UTC m=+184.863596266 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.105954 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"602cc6c6-054f-46bd-a044-0ed68baa41ad","Type":"ContainerStarted","Data":"37984351dd838e60198b441283b37c70fad224f6193186b6ff143b7a9ac34f5a"} Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.110544 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" event={"ID":"eba39d45-3292-48d5-be72-9f948b5ff2fe","Type":"ContainerStarted","Data":"2f6e79f536d6813edd691957ef1a7604e24e8087aff242f0603dcd0c1226bf53"} Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.115293 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040","Type":"ContainerStarted","Data":"d08ee2cb63c034c728b75f7876c07a330e30a5fd39ee9562e01991afe1e0676a"} Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.117815 4706 generic.go:334] "Generic (PLEG): container finished" podID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerID="2437e439b6f7daf1b036314ee54767cb3812b9bdce40f622d96a382a98f8e597" exitCode=0 Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.117851 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerDied","Data":"2437e439b6f7daf1b036314ee54767cb3812b9bdce40f622d96a382a98f8e597"} Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.137568 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.138118 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.638094252 +0000 UTC m=+184.965918196 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.239275 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.239510 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.739471407 +0000 UTC m=+185.067295361 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.239766 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.240285 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.740261899 +0000 UTC m=+185.068085873 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.341241 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.341518 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.84148034 +0000 UTC m=+185.169304324 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.341668 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.342192 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.842174669 +0000 UTC m=+185.169998653 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.474022 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.474273 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.974232112 +0000 UTC m=+185.302056096 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.474360 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.474907 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:42.974891349 +0000 UTC m=+185.302715323 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.575120 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.575470 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.075447452 +0000 UTC m=+185.403271436 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.644031 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:42 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:42 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:42 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.644116 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.676909 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.677408 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.177387494 +0000 UTC m=+185.505211428 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.778080 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.778209 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.278182363 +0000 UTC m=+185.606006327 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.778252 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.778677 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.278666926 +0000 UTC m=+185.606490890 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.880072 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.880927 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.380873753 +0000 UTC m=+185.708697737 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:42 crc kubenswrapper[4706]: I1206 05:22:42.982728 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:42 crc kubenswrapper[4706]: E1206 05:22:42.983245 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.483222455 +0000 UTC m=+185.811046399 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.085296 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.085574 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.585542626 +0000 UTC m=+185.913366570 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.085673 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.086110 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.586099801 +0000 UTC m=+185.913923925 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.187560 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.187801 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.687768484 +0000 UTC m=+186.015592428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.188311 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.188882 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.688856273 +0000 UTC m=+186.016680217 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.289683 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.289976 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.789933471 +0000 UTC m=+186.117757425 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.290159 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.290595 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.790584208 +0000 UTC m=+186.118408352 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.391012 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.391225 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.891191173 +0000 UTC m=+186.219015117 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.391392 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.391754 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.891738218 +0000 UTC m=+186.219562162 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.492673 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.492933 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.992897808 +0000 UTC m=+186.320721752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.493570 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.494176 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:43.994165761 +0000 UTC m=+186.321989705 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.595547 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.595736 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.095703481 +0000 UTC m=+186.423527425 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.596389 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.596837 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.096815941 +0000 UTC m=+186.424639885 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.645500 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:43 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:43 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:43 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.645619 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.698177 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.698404 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.198362541 +0000 UTC m=+186.526186495 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.698618 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.699138 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.199120441 +0000 UTC m=+186.526944425 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.799349 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.799559 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.29952219 +0000 UTC m=+186.627346134 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.799920 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.800315 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.300301221 +0000 UTC m=+186.628125165 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.904462 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.904642 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.404617176 +0000 UTC m=+186.732441140 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:43 crc kubenswrapper[4706]: I1206 05:22:43.907356 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:43 crc kubenswrapper[4706]: E1206 05:22:43.907818 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.407801732 +0000 UTC m=+186.735625686 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.009165 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.010178 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.510147123 +0000 UTC m=+186.837971107 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.111738 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.112243 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.612220678 +0000 UTC m=+186.940044682 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.129024 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerStarted","Data":"fbad4d63d909c3eabeb0e96bb2186b8179ad7955dcb5456472c41c48a1587f58"} Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.131028 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" event={"ID":"57c94140-5c17-4423-82aa-e62f070fa68c","Type":"ContainerStarted","Data":"00e2dc55758eeb982b7534ed37b732dba07fff138d9b3cb5d47afb8bc91d7c12"} Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.132668 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" event={"ID":"4edab72e-ed84-4e90-86da-02b3d3aa33bf","Type":"ContainerStarted","Data":"38ceca4a86197b7f69c99285530a6cb1a945d77c6bc68c0fc3612dd5180d152b"} Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.134699 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerStarted","Data":"ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4"} Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.213665 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.213913 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.71387924 +0000 UTC m=+187.041703184 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.214230 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.214670 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.714651601 +0000 UTC m=+187.042475555 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.315627 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.315984 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.815937885 +0000 UTC m=+187.143761869 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.417973 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.418524 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:44.918498151 +0000 UTC m=+187.246322125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.519098 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.519592 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.019536398 +0000 UTC m=+187.347360392 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.519766 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.520690 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.020620157 +0000 UTC m=+187.348444141 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.620802 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.620999 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.120963724 +0000 UTC m=+187.448787668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.621276 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.621670 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.121660364 +0000 UTC m=+187.449484388 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.644245 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:44 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:44 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:44 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.644340 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.722305 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.722493 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.222461514 +0000 UTC m=+187.550285458 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.722610 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.722995 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.222988288 +0000 UTC m=+187.550812232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.823719 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.823872 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.323849299 +0000 UTC m=+187.651673243 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.824148 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.824477 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.324469475 +0000 UTC m=+187.652293419 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.925892 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.926153 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.426113128 +0000 UTC m=+187.753937062 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:44 crc kubenswrapper[4706]: I1206 05:22:44.926424 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:44 crc kubenswrapper[4706]: E1206 05:22:44.926802 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.426784787 +0000 UTC m=+187.754608721 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.047835 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.048156 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.54812124 +0000 UTC m=+187.875945184 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.048301 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.048824 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.548802628 +0000 UTC m=+187.876626572 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.149639 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.149836 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.649795923 +0000 UTC m=+187.977620047 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.150238 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.150734 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.650725098 +0000 UTC m=+187.978549042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.251673 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.251868 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.751832887 +0000 UTC m=+188.079656831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.253239 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.253929 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.753894572 +0000 UTC m=+188.081718556 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.354239 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.354637 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.854582629 +0000 UTC m=+188.182406613 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.355026 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.355939 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.855920595 +0000 UTC m=+188.183744579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.456089 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.456379 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.956330185 +0000 UTC m=+188.284154159 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.456475 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.457298 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:45.957094355 +0000 UTC m=+188.284918349 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.558081 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.058016488 +0000 UTC m=+188.385840472 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.557897 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.558331 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.558790 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.058775328 +0000 UTC m=+188.386599312 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.642996 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:45 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:45 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:45 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.643150 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.659780 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.660263 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.160221135 +0000 UTC m=+188.488045119 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.761107 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.761670 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.261641473 +0000 UTC m=+188.589465457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.862457 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.862775 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.36272182 +0000 UTC m=+188.690545804 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.863198 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.863762 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.363743307 +0000 UTC m=+188.691567291 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.964107 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.964618 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.464582918 +0000 UTC m=+188.792406862 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:45 crc kubenswrapper[4706]: I1206 05:22:45.966664 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:45 crc kubenswrapper[4706]: E1206 05:22:45.970296 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.470267052 +0000 UTC m=+188.798091026 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.069221 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.070871 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.570839825 +0000 UTC m=+188.898663769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.147016 4706 generic.go:334] "Generic (PLEG): container finished" podID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerID="9832f7a7badc382a0d8db8ade0a6b05781c85b398accc1be9de35d344646a188" exitCode=0 Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.147106 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wx94f" event={"ID":"404f2b83-1030-4b10-b1cf-c7db67aae01f","Type":"ContainerDied","Data":"9832f7a7badc382a0d8db8ade0a6b05781c85b398accc1be9de35d344646a188"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.149981 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.151994 4706 generic.go:334] "Generic (PLEG): container finished" podID="39728d8c-03c4-42d3-999d-1dfe014cfb34" containerID="72a35fd1caa4dd68f2228a6c426ff2fc121cea0a9cf2c8382b55c998a241e913" exitCode=0 Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.152078 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" event={"ID":"39728d8c-03c4-42d3-999d-1dfe014cfb34","Type":"ContainerDied","Data":"72a35fd1caa4dd68f2228a6c426ff2fc121cea0a9cf2c8382b55c998a241e913"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.156824 4706 generic.go:334] "Generic (PLEG): container finished" podID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerID="ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4" exitCode=0 Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.156868 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerDied","Data":"ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.160401 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" event={"ID":"3a68cbce-a0d0-4128-b5fc-ba2664947314","Type":"ContainerStarted","Data":"cc319150e352dc43b557615662305e73906eab1b0d245572ea6bf1cee124481b"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.162506 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerStarted","Data":"61257864046feff646caad462392480e6d5a416200b3db0009bd6a207e692d68"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.167630 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" event={"ID":"cba73644-0f32-4d53-9c68-e98d52909f9a","Type":"ContainerStarted","Data":"fd7d055d44d341148c61f0804746b85a13b25b0a881441f2a4ade04f8f5be7d5"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.174486 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.175057 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" event={"ID":"6f29acc2-2357-4418-9680-e743ccba8702","Type":"ContainerStarted","Data":"0e60184ead536a4d65379ff51d336d74f6c8a96d714f452f0aa4d2acdd719e69"} Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.175435 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.675415417 +0000 UTC m=+189.003239361 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.179340 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" event={"ID":"a2b02aaa-3dd3-462e-9dd6-c69748bc8511","Type":"ContainerStarted","Data":"3c8c37bf5bc4a83606b3938f044e605e409dc6590652cac9e96218da69c8ca91"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.180952 4706 generic.go:334] "Generic (PLEG): container finished" podID="514a779d-1633-49f5-a991-5a80d8714c19" containerID="1db8644cab93daaae7251ee05a3472b05944a76e27862c608542d0e759043e06" exitCode=0 Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.181239 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-svf4w" event={"ID":"514a779d-1633-49f5-a991-5a80d8714c19","Type":"ContainerDied","Data":"1db8644cab93daaae7251ee05a3472b05944a76e27862c608542d0e759043e06"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.183461 4706 generic.go:334] "Generic (PLEG): container finished" podID="602cc6c6-054f-46bd-a044-0ed68baa41ad" containerID="37984351dd838e60198b441283b37c70fad224f6193186b6ff143b7a9ac34f5a" exitCode=0 Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.183664 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"602cc6c6-054f-46bd-a044-0ed68baa41ad","Type":"ContainerDied","Data":"37984351dd838e60198b441283b37c70fad224f6193186b6ff143b7a9ac34f5a"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.186631 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040","Type":"ContainerStarted","Data":"266f5781f0927bb2f339d2dd371dee4b4a63dc6c401f944e4132755d396db3a2"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.192724 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" event={"ID":"f05088c1-1548-4c56-8e14-3610540dec5c","Type":"ContainerStarted","Data":"24f346741ccca2ee48e43f3c7b29236645177adac31663c3fa3a1801c4e714b5"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.196180 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" event={"ID":"bd187a9c-688a-463f-a84a-6fb7c1df0360","Type":"ContainerStarted","Data":"afb33012be8882149a98c19fa0f91ca30403f98c9824068b761cae7f26cc0c2e"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.197915 4706 generic.go:334] "Generic (PLEG): container finished" podID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerID="fbad4d63d909c3eabeb0e96bb2186b8179ad7955dcb5456472c41c48a1587f58" exitCode=0 Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.197999 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerDied","Data":"fbad4d63d909c3eabeb0e96bb2186b8179ad7955dcb5456472c41c48a1587f58"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.200961 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-jk2pn" event={"ID":"971d28d8-7a3b-4af0-a3e3-9ee9468dbca5","Type":"ContainerStarted","Data":"b9f229b67cd1e7c7c219e4b073d2080416e392ae808292cb676406ac30aa295a"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.208366 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" event={"ID":"28ae28d5-433c-4ce7-bb6e-2532d65b354d","Type":"ContainerStarted","Data":"871cf69c1a306dbca0c739956d36343e801979cb39a373174423c983c8ed8619"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.210504 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerStarted","Data":"88b67b10ab6d97c84ebeab02b174ded59ef788e294e6260611fed0c922a202be"} Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.212107 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.219318 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.224762 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-vfwnb" podStartSLOduration=160.224740998 podStartE2EDuration="2m40.224740998s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.2181289 +0000 UTC m=+188.545952864" watchObservedRunningTime="2025-12-06 05:22:46.224740998 +0000 UTC m=+188.552564942" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.260125 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" podStartSLOduration=160.260100682 podStartE2EDuration="2m40.260100682s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.237690308 +0000 UTC m=+188.565514252" watchObservedRunningTime="2025-12-06 05:22:46.260100682 +0000 UTC m=+188.587924636" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.275190 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.275324 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.775302762 +0000 UTC m=+189.103126706 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.275744 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.277599 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.777570153 +0000 UTC m=+189.105394097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.369902 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-65j9k" podStartSLOduration=160.369781962 podStartE2EDuration="2m40.369781962s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.336715179 +0000 UTC m=+188.664539123" watchObservedRunningTime="2025-12-06 05:22:46.369781962 +0000 UTC m=+188.697605906" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.376950 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.377164 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.877134989 +0000 UTC m=+189.204958933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.377325 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.377756 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.877748157 +0000 UTC m=+189.205572101 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.445668 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-5n589" podStartSLOduration=160.445649049 podStartE2EDuration="2m40.445649049s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.421403584 +0000 UTC m=+188.749227538" watchObservedRunningTime="2025-12-06 05:22:46.445649049 +0000 UTC m=+188.773472993" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.447628 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-wr2bq" podStartSLOduration=160.447618811 podStartE2EDuration="2m40.447618811s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.444987261 +0000 UTC m=+188.772811205" watchObservedRunningTime="2025-12-06 05:22:46.447618811 +0000 UTC m=+188.775442755" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.465429 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-jhgqt" podStartSLOduration=160.465410822 podStartE2EDuration="2m40.465410822s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.464027514 +0000 UTC m=+188.791851458" watchObservedRunningTime="2025-12-06 05:22:46.465410822 +0000 UTC m=+188.793234766" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.478984 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.479445 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:46.97942703 +0000 UTC m=+189.307250974 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.495516 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-9sxtr" podStartSLOduration=162.495491544 podStartE2EDuration="2m42.495491544s" podCreationTimestamp="2025-12-06 05:20:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:46.487275382 +0000 UTC m=+188.815099326" watchObservedRunningTime="2025-12-06 05:22:46.495491544 +0000 UTC m=+188.823315508" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.581639 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.582021 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.082003107 +0000 UTC m=+189.409827051 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.645165 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:46 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:46 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:46 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.645251 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.683766 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.683960 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.183930238 +0000 UTC m=+189.511754202 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.684249 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.684717 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.184701648 +0000 UTC m=+189.512525612 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.786378 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.786711 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.286664699 +0000 UTC m=+189.614488673 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.787030 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.787822 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.28780105 +0000 UTC m=+189.615625034 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.887653 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.887875 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.38784075 +0000 UTC m=+189.715664724 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.888217 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.888748 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.388731774 +0000 UTC m=+189.716555758 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.989817 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.990291 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.490223762 +0000 UTC m=+189.818047736 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:46 crc kubenswrapper[4706]: I1206 05:22:46.990736 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:46 crc kubenswrapper[4706]: E1206 05:22:46.991297 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.491273991 +0000 UTC m=+189.819097935 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.092793 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.093001 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.592959684 +0000 UTC m=+189.920783638 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.093419 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.093891 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.593878349 +0000 UTC m=+189.921702303 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.195548 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.195802 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.695767908 +0000 UTC m=+190.023591852 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.196189 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.196549 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.696538819 +0000 UTC m=+190.024362763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.222781 4706 generic.go:334] "Generic (PLEG): container finished" podID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerID="88b67b10ab6d97c84ebeab02b174ded59ef788e294e6260611fed0c922a202be" exitCode=0 Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.222870 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerDied","Data":"88b67b10ab6d97c84ebeab02b174ded59ef788e294e6260611fed0c922a202be"} Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.224813 4706 generic.go:334] "Generic (PLEG): container finished" podID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerID="61257864046feff646caad462392480e6d5a416200b3db0009bd6a207e692d68" exitCode=0 Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.224914 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerDied","Data":"61257864046feff646caad462392480e6d5a416200b3db0009bd6a207e692d68"} Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.226537 4706 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.228171 4706 generic.go:334] "Generic (PLEG): container finished" podID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerID="9edf981cdcd123f3434bfef0dcf6280fd6948a0ca19ee573a57a44d4c5d73df7" exitCode=0 Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.228347 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerDied","Data":"9edf981cdcd123f3434bfef0dcf6280fd6948a0ca19ee573a57a44d4c5d73df7"} Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.276929 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" podStartSLOduration=161.276904297 podStartE2EDuration="2m41.276904297s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:47.273571497 +0000 UTC m=+189.601395451" watchObservedRunningTime="2025-12-06 05:22:47.276904297 +0000 UTC m=+189.604728241" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.298211 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.300683 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.800661208 +0000 UTC m=+190.128485152 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.301307 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=6.301282115 podStartE2EDuration="6.301282115s" podCreationTimestamp="2025-12-06 05:22:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:47.300088573 +0000 UTC m=+189.627912537" watchObservedRunningTime="2025-12-06 05:22:47.301282115 +0000 UTC m=+189.629106059" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.328068 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-25hf6" podStartSLOduration=161.328027807 podStartE2EDuration="2m41.328027807s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:47.323637368 +0000 UTC m=+189.651461322" watchObservedRunningTime="2025-12-06 05:22:47.328027807 +0000 UTC m=+189.655851751" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.401449 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.403032 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:47.903018109 +0000 UTC m=+190.230842053 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.409810 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-jk2pn" podStartSLOduration=23.409775182 podStartE2EDuration="23.409775182s" podCreationTimestamp="2025-12-06 05:22:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:47.405001243 +0000 UTC m=+189.732825207" watchObservedRunningTime="2025-12-06 05:22:47.409775182 +0000 UTC m=+189.737599126" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.410822 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-qwdgg" podStartSLOduration=161.41081449 podStartE2EDuration="2m41.41081449s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:47.369462445 +0000 UTC m=+189.697286389" watchObservedRunningTime="2025-12-06 05:22:47.41081449 +0000 UTC m=+189.738638434" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.503766 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.504205 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-12-06 05:22:48.00418517 +0000 UTC m=+190.332009114 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.555443 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.607209 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:47 crc kubenswrapper[4706]: E1206 05:22:47.609300 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-12-06 05:22:48.109283835 +0000 UTC m=+190.437107779 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-njll8" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.647120 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:47 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:47 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:47 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.647229 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.668945 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.670796 4706 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-12-06T05:22:47.22657222Z","Handler":null,"Name":""} Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.678004 4706 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.678083 4706 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.681097 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.689166 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.689993 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.708011 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/602cc6c6-054f-46bd-a044-0ed68baa41ad-kubelet-dir\") pod \"602cc6c6-054f-46bd-a044-0ed68baa41ad\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.708175 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/602cc6c6-054f-46bd-a044-0ed68baa41ad-kube-api-access\") pod \"602cc6c6-054f-46bd-a044-0ed68baa41ad\" (UID: \"602cc6c6-054f-46bd-a044-0ed68baa41ad\") " Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.708237 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39728d8c-03c4-42d3-999d-1dfe014cfb34-secret-volume\") pod \"39728d8c-03c4-42d3-999d-1dfe014cfb34\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.708319 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zt7tm\" (UniqueName: \"kubernetes.io/projected/39728d8c-03c4-42d3-999d-1dfe014cfb34-kube-api-access-zt7tm\") pod \"39728d8c-03c4-42d3-999d-1dfe014cfb34\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.708415 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39728d8c-03c4-42d3-999d-1dfe014cfb34-config-volume\") pod \"39728d8c-03c4-42d3-999d-1dfe014cfb34\" (UID: \"39728d8c-03c4-42d3-999d-1dfe014cfb34\") " Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.708598 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.709438 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/602cc6c6-054f-46bd-a044-0ed68baa41ad-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "602cc6c6-054f-46bd-a044-0ed68baa41ad" (UID: "602cc6c6-054f-46bd-a044-0ed68baa41ad"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.711805 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/39728d8c-03c4-42d3-999d-1dfe014cfb34-config-volume" (OuterVolumeSpecName: "config-volume") pod "39728d8c-03c4-42d3-999d-1dfe014cfb34" (UID: "39728d8c-03c4-42d3-999d-1dfe014cfb34"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.717520 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.723469 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39728d8c-03c4-42d3-999d-1dfe014cfb34-kube-api-access-zt7tm" (OuterVolumeSpecName: "kube-api-access-zt7tm") pod "39728d8c-03c4-42d3-999d-1dfe014cfb34" (UID: "39728d8c-03c4-42d3-999d-1dfe014cfb34"). InnerVolumeSpecName "kube-api-access-zt7tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.724111 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/602cc6c6-054f-46bd-a044-0ed68baa41ad-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "602cc6c6-054f-46bd-a044-0ed68baa41ad" (UID: "602cc6c6-054f-46bd-a044-0ed68baa41ad"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.724800 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/39728d8c-03c4-42d3-999d-1dfe014cfb34-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "39728d8c-03c4-42d3-999d-1dfe014cfb34" (UID: "39728d8c-03c4-42d3-999d-1dfe014cfb34"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.759669 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.776794 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.810913 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.811262 4706 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/602cc6c6-054f-46bd-a044-0ed68baa41ad-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.811292 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/602cc6c6-054f-46bd-a044-0ed68baa41ad-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.811305 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/39728d8c-03c4-42d3-999d-1dfe014cfb34-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.811318 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zt7tm\" (UniqueName: \"kubernetes.io/projected/39728d8c-03c4-42d3-999d-1dfe014cfb34-kube-api-access-zt7tm\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.811331 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/39728d8c-03c4-42d3-999d-1dfe014cfb34-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.855068 4706 patch_prober.go:28] interesting pod/console-f9d7485db-t4xd8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.855128 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-t4xd8" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.937267 4706 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Dec 06 05:22:47 crc kubenswrapper[4706]: I1206 05:22:47.937799 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.012237 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.012766 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-njll8\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.043515 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.044385 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.044773 4706 patch_prober.go:28] interesting pod/downloads-7954f5f757-6xms4 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.044819 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-6xms4" podUID="190e4233-a97e-4af7-8e7e-d66ccf827546" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.044944 4706 patch_prober.go:28] interesting pod/downloads-7954f5f757-6xms4 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" start-of-body= Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.045039 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-6xms4" podUID="190e4233-a97e-4af7-8e7e-d66ccf827546" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.39:8080/\": dial tcp 10.217.0.39:8080: connect: connection refused" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.235289 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.235276 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"602cc6c6-054f-46bd-a044-0ed68baa41ad","Type":"ContainerDied","Data":"f25182cec79d28bf54a1865342f9dfed95b0862c121fe9256a2b7c1289b2ebd3"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.235441 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f25182cec79d28bf54a1865342f9dfed95b0862c121fe9256a2b7c1289b2ebd3" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.237558 4706 generic.go:334] "Generic (PLEG): container finished" podID="7d3b0e3c-72f0-41d4-af60-a2f50ea3a040" containerID="266f5781f0927bb2f339d2dd371dee4b4a63dc6c401f944e4132755d396db3a2" exitCode=0 Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.237636 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040","Type":"ContainerDied","Data":"266f5781f0927bb2f339d2dd371dee4b4a63dc6c401f944e4132755d396db3a2"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.239715 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" event={"ID":"39728d8c-03c4-42d3-999d-1dfe014cfb34","Type":"ContainerDied","Data":"a30834a18e94286871139c36b5c098b472b3ee08655e6684f3943f04c12aa0ee"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.239749 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a30834a18e94286871139c36b5c098b472b3ee08655e6684f3943f04c12aa0ee" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.239785 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.242819 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-4ltjs" event={"ID":"f4065785-c72e-4c45-ab51-ce292be4f2ed","Type":"ContainerStarted","Data":"51c7f4a384fdfcb2c27323c650254b6b489c110591966b036b9c6855cefdf9c0"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.243206 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.244597 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" event={"ID":"33133042-30b9-487e-8ee4-097e0faf7673","Type":"ContainerStarted","Data":"5f5e35b1efe360e49a64c3e7370e8cde1fc5ae158bb45de7f776dd5a56487de3"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.248319 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" event={"ID":"445fbc3d-3a2f-4361-8444-badce4d8e564","Type":"ContainerStarted","Data":"9b783abe8cabe48d046595234979ee5cdbad4fa8894032ec44e9010cfad6dcbd"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.248765 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.250802 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" event={"ID":"3a68cbce-a0d0-4128-b5fc-ba2664947314","Type":"ContainerStarted","Data":"8c74de22fb0ef72721a0438f59dcca0a7acfecaf15c84cf52f053b65680885f6"} Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.306377 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-4ltjs" podStartSLOduration=162.306357314 podStartE2EDuration="2m42.306357314s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:48.303107236 +0000 UTC m=+190.630931190" watchObservedRunningTime="2025-12-06 05:22:48.306357314 +0000 UTC m=+190.634181258" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.307266 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.364901 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" podStartSLOduration=163.364874033 podStartE2EDuration="2m43.364874033s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:48.332605242 +0000 UTC m=+190.660429186" watchObservedRunningTime="2025-12-06 05:22:48.364874033 +0000 UTC m=+190.692697977" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.561206 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-njll8"] Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.644667 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:48 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:48 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:48 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.644763 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:48 crc kubenswrapper[4706]: I1206 05:22:48.860293 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-mkj59" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.059827 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.062345 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-jk2pn" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.257245 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" event={"ID":"cbcef7ec-a2f0-4363-93e6-772d6d35d571","Type":"ContainerStarted","Data":"e7daac2ac8be4ddc494adf6491ed17519d2d27a6062df2ded8fce5295e589ac5"} Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.495298 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.502215 4706 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-h55gt container/oauth-apiserver namespace/openshift-oauth-apiserver: Readiness probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]log ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]etcd ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]etcd-readiness ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/start-apiserver-admission-initializer ok Dec 06 05:22:49 crc kubenswrapper[4706]: [-]informer-sync failed: reason withheld Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/generic-apiserver-start-informers ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/max-in-flight-filter ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/storage-object-count-tracker-hook ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/openshift.io-StartUserInformer ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/openshift.io-StartOAuthInformer ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]poststarthook/openshift.io-StartTokenTimeoutUpdater ok Dec 06 05:22:49 crc kubenswrapper[4706]: [+]shutdown ok Dec 06 05:22:49 crc kubenswrapper[4706]: readyz check failed Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.502314 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" podUID="f05088c1-1548-4c56-8e14-3610540dec5c" containerName="oauth-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.549717 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.644826 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:49 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:49 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:49 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.644889 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.646771 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kube-api-access\") pod \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.646834 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kubelet-dir\") pod \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\" (UID: \"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040\") " Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.647106 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "7d3b0e3c-72f0-41d4-af60-a2f50ea3a040" (UID: "7d3b0e3c-72f0-41d4-af60-a2f50ea3a040"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.647282 4706 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.669336 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "7d3b0e3c-72f0-41d4-af60-a2f50ea3a040" (UID: "7d3b0e3c-72f0-41d4-af60-a2f50ea3a040"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:22:49 crc kubenswrapper[4706]: I1206 05:22:49.749019 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/7d3b0e3c-72f0-41d4-af60-a2f50ea3a040-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:22:50 crc kubenswrapper[4706]: I1206 05:22:50.266477 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"7d3b0e3c-72f0-41d4-af60-a2f50ea3a040","Type":"ContainerDied","Data":"d08ee2cb63c034c728b75f7876c07a330e30a5fd39ee9562e01991afe1e0676a"} Dec 06 05:22:50 crc kubenswrapper[4706]: I1206 05:22:50.267022 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d08ee2cb63c034c728b75f7876c07a330e30a5fd39ee9562e01991afe1e0676a" Dec 06 05:22:50 crc kubenswrapper[4706]: I1206 05:22:50.266540 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Dec 06 05:22:50 crc kubenswrapper[4706]: I1206 05:22:50.289770 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-db62k" podStartSLOduration=165.289752949 podStartE2EDuration="2m45.289752949s" podCreationTimestamp="2025-12-06 05:20:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:50.289733279 +0000 UTC m=+192.617557263" watchObservedRunningTime="2025-12-06 05:22:50.289752949 +0000 UTC m=+192.617576893" Dec 06 05:22:50 crc kubenswrapper[4706]: I1206 05:22:50.642701 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:50 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:50 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:50 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:50 crc kubenswrapper[4706]: I1206 05:22:50.642772 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:51 crc kubenswrapper[4706]: I1206 05:22:51.460379 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:51 crc kubenswrapper[4706]: I1206 05:22:51.460464 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:51 crc kubenswrapper[4706]: I1206 05:22:51.468187 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:51 crc kubenswrapper[4706]: I1206 05:22:51.644736 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:51 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:51 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:51 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:51 crc kubenswrapper[4706]: I1206 05:22:51.644847 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:52 crc kubenswrapper[4706]: I1206 05:22:52.310545 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-k8g95" Dec 06 05:22:52 crc kubenswrapper[4706]: I1206 05:22:52.642736 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:52 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:52 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:52 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:52 crc kubenswrapper[4706]: I1206 05:22:52.642808 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:52 crc kubenswrapper[4706]: I1206 05:22:52.690298 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-h55gt" Dec 06 05:22:53 crc kubenswrapper[4706]: I1206 05:22:53.643622 4706 patch_prober.go:28] interesting pod/router-default-5444994796-l9d42 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Dec 06 05:22:53 crc kubenswrapper[4706]: [-]has-synced failed: reason withheld Dec 06 05:22:53 crc kubenswrapper[4706]: [+]process-running ok Dec 06 05:22:53 crc kubenswrapper[4706]: healthz check failed Dec 06 05:22:53 crc kubenswrapper[4706]: I1206 05:22:53.643701 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-l9d42" podUID="de9628dc-df47-4a48-898b-f85d33e59452" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:22:54 crc kubenswrapper[4706]: I1206 05:22:54.546404 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" event={"ID":"3a68cbce-a0d0-4128-b5fc-ba2664947314","Type":"ContainerStarted","Data":"48ef9ac4eb3b6eb6a95c2aaa31fc4b726d9ad3bec091f940832eae46e6a26dae"} Dec 06 05:22:54 crc kubenswrapper[4706]: I1206 05:22:54.552510 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" event={"ID":"cbcef7ec-a2f0-4363-93e6-772d6d35d571","Type":"ContainerStarted","Data":"0814d20245d5d4b36fdc6b8cc07241a34c47a4cd54da2bdc70e8d058dd9d5ea0"} Dec 06 05:22:54 crc kubenswrapper[4706]: I1206 05:22:54.783662 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:54 crc kubenswrapper[4706]: I1206 05:22:54.786956 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-l9d42" Dec 06 05:22:57 crc kubenswrapper[4706]: I1206 05:22:57.583817 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:22:57 crc kubenswrapper[4706]: I1206 05:22:57.608891 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-4gjwk" podStartSLOduration=33.608864283 podStartE2EDuration="33.608864283s" podCreationTimestamp="2025-12-06 05:22:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:57.60579347 +0000 UTC m=+199.933617414" watchObservedRunningTime="2025-12-06 05:22:57.608864283 +0000 UTC m=+199.936688237" Dec 06 05:22:57 crc kubenswrapper[4706]: I1206 05:22:57.628888 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" podStartSLOduration=171.628869226 podStartE2EDuration="2m51.628869226s" podCreationTimestamp="2025-12-06 05:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:22:57.623788258 +0000 UTC m=+199.951612212" watchObservedRunningTime="2025-12-06 05:22:57.628869226 +0000 UTC m=+199.956693170" Dec 06 05:22:57 crc kubenswrapper[4706]: I1206 05:22:57.868811 4706 patch_prober.go:28] interesting pod/console-f9d7485db-t4xd8 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" start-of-body= Dec 06 05:22:57 crc kubenswrapper[4706]: I1206 05:22:57.868912 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-t4xd8" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" probeResult="failure" output="Get \"https://10.217.0.14:8443/health\": dial tcp 10.217.0.14:8443: connect: connection refused" Dec 06 05:22:58 crc kubenswrapper[4706]: I1206 05:22:58.068826 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-6xms4" Dec 06 05:23:05 crc kubenswrapper[4706]: I1206 05:23:05.961313 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:23:05 crc kubenswrapper[4706]: I1206 05:23:05.961892 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:23:07 crc kubenswrapper[4706]: I1206 05:23:07.882151 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:23:07 crc kubenswrapper[4706]: I1206 05:23:07.887370 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.829159 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 06 05:23:17 crc kubenswrapper[4706]: E1206 05:23:17.830986 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="602cc6c6-054f-46bd-a044-0ed68baa41ad" containerName="pruner" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.831169 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="602cc6c6-054f-46bd-a044-0ed68baa41ad" containerName="pruner" Dec 06 05:23:17 crc kubenswrapper[4706]: E1206 05:23:17.831260 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d3b0e3c-72f0-41d4-af60-a2f50ea3a040" containerName="pruner" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.831342 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d3b0e3c-72f0-41d4-af60-a2f50ea3a040" containerName="pruner" Dec 06 05:23:17 crc kubenswrapper[4706]: E1206 05:23:17.831438 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39728d8c-03c4-42d3-999d-1dfe014cfb34" containerName="collect-profiles" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.831525 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="39728d8c-03c4-42d3-999d-1dfe014cfb34" containerName="collect-profiles" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.831663 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="602cc6c6-054f-46bd-a044-0ed68baa41ad" containerName="pruner" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.831677 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d3b0e3c-72f0-41d4-af60-a2f50ea3a040" containerName="pruner" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.831692 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="39728d8c-03c4-42d3-999d-1dfe014cfb34" containerName="collect-profiles" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.832354 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.834188 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.834309 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Dec 06 05:23:17 crc kubenswrapper[4706]: I1206 05:23:17.843325 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.005628 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.005680 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.016731 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-4lmqq" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.106337 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.106382 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.106438 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.130029 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.153024 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:23:18 crc kubenswrapper[4706]: I1206 05:23:18.254544 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.048261 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.049336 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.049469 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.166341 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-var-lock\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.166478 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-kubelet-dir\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.166622 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd90d5c4-d32b-418b-9cb5-b532c9700699-kube-api-access\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.268184 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-var-lock\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.268283 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-kubelet-dir\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.268358 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd90d5c4-d32b-418b-9cb5-b532c9700699-kube-api-access\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.268389 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-var-lock\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.268524 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-kubelet-dir\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.302140 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd90d5c4-d32b-418b-9cb5-b532c9700699-kube-api-access\") pod \"installer-9-crc\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:22 crc kubenswrapper[4706]: I1206 05:23:22.382536 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:23:35 crc kubenswrapper[4706]: I1206 05:23:35.961404 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:23:35 crc kubenswrapper[4706]: I1206 05:23:35.961759 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:23:35 crc kubenswrapper[4706]: I1206 05:23:35.961812 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:23:35 crc kubenswrapper[4706]: I1206 05:23:35.962489 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:23:35 crc kubenswrapper[4706]: I1206 05:23:35.962608 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e" gracePeriod=600 Dec 06 05:23:40 crc kubenswrapper[4706]: I1206 05:23:40.858474 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e" exitCode=0 Dec 06 05:23:40 crc kubenswrapper[4706]: I1206 05:23:40.858586 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e"} Dec 06 05:23:45 crc kubenswrapper[4706]: E1206 05:23:45.068657 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475: Get \"https://registry.redhat.io/v2/redhat/certified-operator-index/blobs/sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475\": context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 06 05:23:45 crc kubenswrapper[4706]: E1206 05:23:45.069646 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rnjm6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-svf4w_openshift-marketplace(514a779d-1633-49f5-a991-5a80d8714c19): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475: Get \"https://registry.redhat.io/v2/redhat/certified-operator-index/blobs/sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475\": context canceled" logger="UnhandledError" Dec 06 05:23:45 crc kubenswrapper[4706]: E1206 05:23:45.070984 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475: Get \\\"https://registry.redhat.io/v2/redhat/certified-operator-index/blobs/sha256:fcd9cdaeec4d21f010a2bb25043386ef71e3c6ca9c62aaf284b705dd309b1475\\\": context canceled\"" pod="openshift-marketplace/certified-operators-svf4w" podUID="514a779d-1633-49f5-a991-5a80d8714c19" Dec 06 05:23:57 crc kubenswrapper[4706]: E1206 05:23:57.971857 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 06 05:23:57 crc kubenswrapper[4706]: E1206 05:23:57.972447 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wkbvx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-9hq8t_openshift-marketplace(fac9e7cf-4919-4a48-b314-f9b985397e7e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:23:57 crc kubenswrapper[4706]: E1206 05:23:57.973544 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-9hq8t" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" Dec 06 05:23:58 crc kubenswrapper[4706]: E1206 05:23:58.987153 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 06 05:23:58 crc kubenswrapper[4706]: E1206 05:23:58.987602 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-44ghq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-wx94f_openshift-marketplace(404f2b83-1030-4b10-b1cf-c7db67aae01f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:23:58 crc kubenswrapper[4706]: E1206 05:23:58.988784 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-wx94f" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" Dec 06 05:24:02 crc kubenswrapper[4706]: E1206 05:24:02.235229 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-wx94f" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" Dec 06 05:24:02 crc kubenswrapper[4706]: E1206 05:24:02.235405 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-9hq8t" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" Dec 06 05:24:02 crc kubenswrapper[4706]: E1206 05:24:02.337305 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 06 05:24:02 crc kubenswrapper[4706]: E1206 05:24:02.337817 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tkt56,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-j4frb_openshift-marketplace(2d21abc6-d736-47df-8eac-4dee0691a92c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:24:02 crc kubenswrapper[4706]: E1206 05:24:02.339216 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-j4frb" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.256551 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-j4frb" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.353917 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.354167 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-89xk8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-r2clj_openshift-marketplace(52e328e7-19c9-4412-96f0-582cd5add7c5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.355532 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-r2clj" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.909380 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.909951 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wttgm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-j5lbl_openshift-marketplace(5e6f7aa9-bbf3-4160-9eb8-e7d54c354202): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:24:05 crc kubenswrapper[4706]: E1206 05:24:05.911264 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-j5lbl" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" Dec 06 05:24:06 crc kubenswrapper[4706]: I1206 05:24:06.018834 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"a50b611b00cc5b19681640fa0163c59ec199ee057feb6e3aa5bd246ae8a33948"} Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.020966 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-j5lbl" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" Dec 06 05:24:06 crc kubenswrapper[4706]: I1206 05:24:06.175473 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Dec 06 05:24:06 crc kubenswrapper[4706]: W1206 05:24:06.179267 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poda84582fb_ebc7_4e13_9d93_9db3cfa2edee.slice/crio-997658db62692a7a7fb58572c530566794d61e7f0b824d391d838d218db6be85 WatchSource:0}: Error finding container 997658db62692a7a7fb58572c530566794d61e7f0b824d391d838d218db6be85: Status 404 returned error can't find the container with id 997658db62692a7a7fb58572c530566794d61e7f0b824d391d838d218db6be85 Dec 06 05:24:06 crc kubenswrapper[4706]: W1206 05:24:06.180451 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podbd90d5c4_d32b_418b_9cb5_b532c9700699.slice/crio-953c2baf899066d0c00034ef643a6f7df23324db8ca37fb57ecbeee040d53ebd WatchSource:0}: Error finding container 953c2baf899066d0c00034ef643a6f7df23324db8ca37fb57ecbeee040d53ebd: Status 404 returned error can't find the container with id 953c2baf899066d0c00034ef643a6f7df23324db8ca37fb57ecbeee040d53ebd Dec 06 05:24:06 crc kubenswrapper[4706]: I1206 05:24:06.188122 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.336004 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.336550 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ngsgc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-dwthx_openshift-marketplace(cf5c1feb-f09b-41c2-9974-56538ccc281f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.338231 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-dwthx" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.398023 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.398486 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-srtrx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-98kq2_openshift-marketplace(2985a55d-3af2-4dd6-adde-7714459e08c3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Dec 06 05:24:06 crc kubenswrapper[4706]: E1206 05:24:06.399682 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-98kq2" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.024856 4706 generic.go:334] "Generic (PLEG): container finished" podID="514a779d-1633-49f5-a991-5a80d8714c19" containerID="a8c7de5499a4dac70c76a1e82816f93d9ebe14a510da0c23a7b15f153bb1f3b5" exitCode=0 Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.024922 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-svf4w" event={"ID":"514a779d-1633-49f5-a991-5a80d8714c19","Type":"ContainerDied","Data":"a8c7de5499a4dac70c76a1e82816f93d9ebe14a510da0c23a7b15f153bb1f3b5"} Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.028015 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"bd90d5c4-d32b-418b-9cb5-b532c9700699","Type":"ContainerStarted","Data":"b71fc574f1a02e7bb52abe50878fcce79b8631b81fb4017e6ff5b076f43e49d1"} Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.028115 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"bd90d5c4-d32b-418b-9cb5-b532c9700699","Type":"ContainerStarted","Data":"953c2baf899066d0c00034ef643a6f7df23324db8ca37fb57ecbeee040d53ebd"} Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.032549 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a84582fb-ebc7-4e13-9d93-9db3cfa2edee","Type":"ContainerStarted","Data":"4b571a0f93c34054a418f25517509a7b6ede6a9a319c96b26d1fd07ae13d12e0"} Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.032592 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a84582fb-ebc7-4e13-9d93-9db3cfa2edee","Type":"ContainerStarted","Data":"997658db62692a7a7fb58572c530566794d61e7f0b824d391d838d218db6be85"} Dec 06 05:24:07 crc kubenswrapper[4706]: E1206 05:24:07.034137 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-dwthx" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" Dec 06 05:24:07 crc kubenswrapper[4706]: E1206 05:24:07.034466 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-98kq2" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.056176 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=50.056157019 podStartE2EDuration="50.056157019s" podCreationTimestamp="2025-12-06 05:23:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:24:07.051162012 +0000 UTC m=+269.378985986" watchObservedRunningTime="2025-12-06 05:24:07.056157019 +0000 UTC m=+269.383980963" Dec 06 05:24:07 crc kubenswrapper[4706]: I1206 05:24:07.106382 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=45.106359076 podStartE2EDuration="45.106359076s" podCreationTimestamp="2025-12-06 05:23:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:24:07.103456426 +0000 UTC m=+269.431280370" watchObservedRunningTime="2025-12-06 05:24:07.106359076 +0000 UTC m=+269.434183020" Dec 06 05:24:08 crc kubenswrapper[4706]: I1206 05:24:08.043161 4706 generic.go:334] "Generic (PLEG): container finished" podID="a84582fb-ebc7-4e13-9d93-9db3cfa2edee" containerID="4b571a0f93c34054a418f25517509a7b6ede6a9a319c96b26d1fd07ae13d12e0" exitCode=0 Dec 06 05:24:08 crc kubenswrapper[4706]: I1206 05:24:08.044305 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-svf4w" event={"ID":"514a779d-1633-49f5-a991-5a80d8714c19","Type":"ContainerStarted","Data":"c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53"} Dec 06 05:24:08 crc kubenswrapper[4706]: I1206 05:24:08.044333 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a84582fb-ebc7-4e13-9d93-9db3cfa2edee","Type":"ContainerDied","Data":"4b571a0f93c34054a418f25517509a7b6ede6a9a319c96b26d1fd07ae13d12e0"} Dec 06 05:24:08 crc kubenswrapper[4706]: I1206 05:24:08.085402 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-svf4w" podStartSLOduration=10.858310611 podStartE2EDuration="1m32.085384724s" podCreationTimestamp="2025-12-06 05:22:36 +0000 UTC" firstStartedPulling="2025-12-06 05:22:46.182459167 +0000 UTC m=+188.510283111" lastFinishedPulling="2025-12-06 05:24:07.40953328 +0000 UTC m=+269.737357224" observedRunningTime="2025-12-06 05:24:08.077028955 +0000 UTC m=+270.404852909" watchObservedRunningTime="2025-12-06 05:24:08.085384724 +0000 UTC m=+270.413208668" Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.367112 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.498619 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kube-api-access\") pod \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.498868 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kubelet-dir\") pod \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\" (UID: \"a84582fb-ebc7-4e13-9d93-9db3cfa2edee\") " Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.499177 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a84582fb-ebc7-4e13-9d93-9db3cfa2edee" (UID: "a84582fb-ebc7-4e13-9d93-9db3cfa2edee"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.499356 4706 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.504643 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a84582fb-ebc7-4e13-9d93-9db3cfa2edee" (UID: "a84582fb-ebc7-4e13-9d93-9db3cfa2edee"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:24:09 crc kubenswrapper[4706]: I1206 05:24:09.600309 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a84582fb-ebc7-4e13-9d93-9db3cfa2edee-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:10 crc kubenswrapper[4706]: I1206 05:24:10.059285 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a84582fb-ebc7-4e13-9d93-9db3cfa2edee","Type":"ContainerDied","Data":"997658db62692a7a7fb58572c530566794d61e7f0b824d391d838d218db6be85"} Dec 06 05:24:10 crc kubenswrapper[4706]: I1206 05:24:10.059590 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="997658db62692a7a7fb58572c530566794d61e7f0b824d391d838d218db6be85" Dec 06 05:24:10 crc kubenswrapper[4706]: I1206 05:24:10.059406 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Dec 06 05:24:15 crc kubenswrapper[4706]: I1206 05:24:15.093068 4706 generic.go:334] "Generic (PLEG): container finished" podID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerID="b21c91b14d3bea00a6952027d60ea46c36b1bf093e970f4ad1de459fdfaa553e" exitCode=0 Dec 06 05:24:15 crc kubenswrapper[4706]: I1206 05:24:15.093399 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerDied","Data":"b21c91b14d3bea00a6952027d60ea46c36b1bf093e970f4ad1de459fdfaa553e"} Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.089139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.089256 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.090955 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.091830 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.100474 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.117851 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.190611 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.190805 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.194645 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.202995 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.215182 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.217489 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.253100 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.262806 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.269544 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.599085 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.599144 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:24:16 crc kubenswrapper[4706]: I1206 05:24:16.665212 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:24:16 crc kubenswrapper[4706]: W1206 05:24:16.694313 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-836bb31e511d9f6fd341cc3f230741cd70661fc03957abd786f49e2d0ab348c5 WatchSource:0}: Error finding container 836bb31e511d9f6fd341cc3f230741cd70661fc03957abd786f49e2d0ab348c5: Status 404 returned error can't find the container with id 836bb31e511d9f6fd341cc3f230741cd70661fc03957abd786f49e2d0ab348c5 Dec 06 05:24:16 crc kubenswrapper[4706]: W1206 05:24:16.695239 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-2d831c43c8c2bf24677152fed66e4e79eaf99da60d01152216306d9bd7cc8d53 WatchSource:0}: Error finding container 2d831c43c8c2bf24677152fed66e4e79eaf99da60d01152216306d9bd7cc8d53: Status 404 returned error can't find the container with id 2d831c43c8c2bf24677152fed66e4e79eaf99da60d01152216306d9bd7cc8d53 Dec 06 05:24:16 crc kubenswrapper[4706]: W1206 05:24:16.755475 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-4457b65264082be8ac465fab4c748616166d07ac5fb85fa8418246add75b1550 WatchSource:0}: Error finding container 4457b65264082be8ac465fab4c748616166d07ac5fb85fa8418246add75b1550: Status 404 returned error can't find the container with id 4457b65264082be8ac465fab4c748616166d07ac5fb85fa8418246add75b1550 Dec 06 05:24:17 crc kubenswrapper[4706]: I1206 05:24:17.105722 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"836bb31e511d9f6fd341cc3f230741cd70661fc03957abd786f49e2d0ab348c5"} Dec 06 05:24:17 crc kubenswrapper[4706]: I1206 05:24:17.106641 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"4457b65264082be8ac465fab4c748616166d07ac5fb85fa8418246add75b1550"} Dec 06 05:24:17 crc kubenswrapper[4706]: I1206 05:24:17.107717 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"2d831c43c8c2bf24677152fed66e4e79eaf99da60d01152216306d9bd7cc8d53"} Dec 06 05:24:17 crc kubenswrapper[4706]: I1206 05:24:17.144590 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:24:18 crc kubenswrapper[4706]: I1206 05:24:18.113619 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"bea892604dfbd17b95de82aedbcaae1450ee01e106282ebd1c89a6f95eaaafa1"} Dec 06 05:24:18 crc kubenswrapper[4706]: I1206 05:24:18.115028 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f20bd872abdeaf83c9472d4ff76dbab90f815c3a1d24a81e4b94fdf8fd470ec2"} Dec 06 05:24:18 crc kubenswrapper[4706]: I1206 05:24:18.117323 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"5acbdb988c31eaffe7e5044db8428af13cebd76f7d0e02acd82ae46847455346"} Dec 06 05:24:18 crc kubenswrapper[4706]: I1206 05:24:18.117472 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:24:18 crc kubenswrapper[4706]: I1206 05:24:18.255948 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-svf4w"] Dec 06 05:24:19 crc kubenswrapper[4706]: I1206 05:24:19.124289 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerStarted","Data":"eb7ae86cfa3f35e8d8f59d40d8139be27ddbaac3b00af219a688ae33f0f8d778"} Dec 06 05:24:19 crc kubenswrapper[4706]: I1206 05:24:19.125950 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-svf4w" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="registry-server" containerID="cri-o://c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53" gracePeriod=2 Dec 06 05:24:19 crc kubenswrapper[4706]: I1206 05:24:19.145793 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9hq8t" podStartSLOduration=11.638283354 podStartE2EDuration="1m43.145759591s" podCreationTimestamp="2025-12-06 05:22:36 +0000 UTC" firstStartedPulling="2025-12-06 05:22:46.19960487 +0000 UTC m=+188.527428814" lastFinishedPulling="2025-12-06 05:24:17.707081097 +0000 UTC m=+280.034905051" observedRunningTime="2025-12-06 05:24:19.143444487 +0000 UTC m=+281.471268441" watchObservedRunningTime="2025-12-06 05:24:19.145759591 +0000 UTC m=+281.473583535" Dec 06 05:24:20 crc kubenswrapper[4706]: I1206 05:24:20.130288 4706 generic.go:334] "Generic (PLEG): container finished" podID="514a779d-1633-49f5-a991-5a80d8714c19" containerID="c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53" exitCode=0 Dec 06 05:24:20 crc kubenswrapper[4706]: I1206 05:24:20.130364 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-svf4w" event={"ID":"514a779d-1633-49f5-a991-5a80d8714c19","Type":"ContainerDied","Data":"c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53"} Dec 06 05:24:26 crc kubenswrapper[4706]: I1206 05:24:26.519424 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:24:26 crc kubenswrapper[4706]: I1206 05:24:26.519860 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:24:26 crc kubenswrapper[4706]: I1206 05:24:26.567294 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:24:26 crc kubenswrapper[4706]: E1206 05:24:26.601354 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53 is running failed: container process not found" containerID="c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:24:26 crc kubenswrapper[4706]: E1206 05:24:26.602775 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53 is running failed: container process not found" containerID="c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:24:26 crc kubenswrapper[4706]: E1206 05:24:26.603192 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53 is running failed: container process not found" containerID="c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:24:26 crc kubenswrapper[4706]: E1206 05:24:26.603339 4706 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-svf4w" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="registry-server" Dec 06 05:24:27 crc kubenswrapper[4706]: I1206 05:24:27.205911 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:24:27 crc kubenswrapper[4706]: I1206 05:24:27.860527 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9hq8t"] Dec 06 05:24:29 crc kubenswrapper[4706]: I1206 05:24:29.177615 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9hq8t" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="registry-server" containerID="cri-o://eb7ae86cfa3f35e8d8f59d40d8139be27ddbaac3b00af219a688ae33f0f8d778" gracePeriod=2 Dec 06 05:24:32 crc kubenswrapper[4706]: I1206 05:24:32.199129 4706 generic.go:334] "Generic (PLEG): container finished" podID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerID="eb7ae86cfa3f35e8d8f59d40d8139be27ddbaac3b00af219a688ae33f0f8d778" exitCode=0 Dec 06 05:24:32 crc kubenswrapper[4706]: I1206 05:24:32.199230 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerDied","Data":"eb7ae86cfa3f35e8d8f59d40d8139be27ddbaac3b00af219a688ae33f0f8d778"} Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.739346 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.744060 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.869430 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-catalog-content\") pod \"514a779d-1633-49f5-a991-5a80d8714c19\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.869491 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnjm6\" (UniqueName: \"kubernetes.io/projected/514a779d-1633-49f5-a991-5a80d8714c19-kube-api-access-rnjm6\") pod \"514a779d-1633-49f5-a991-5a80d8714c19\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.869576 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-utilities\") pod \"fac9e7cf-4919-4a48-b314-f9b985397e7e\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.869604 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wkbvx\" (UniqueName: \"kubernetes.io/projected/fac9e7cf-4919-4a48-b314-f9b985397e7e-kube-api-access-wkbvx\") pod \"fac9e7cf-4919-4a48-b314-f9b985397e7e\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.869633 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-utilities\") pod \"514a779d-1633-49f5-a991-5a80d8714c19\" (UID: \"514a779d-1633-49f5-a991-5a80d8714c19\") " Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.869656 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-catalog-content\") pod \"fac9e7cf-4919-4a48-b314-f9b985397e7e\" (UID: \"fac9e7cf-4919-4a48-b314-f9b985397e7e\") " Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.871018 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-utilities" (OuterVolumeSpecName: "utilities") pod "514a779d-1633-49f5-a991-5a80d8714c19" (UID: "514a779d-1633-49f5-a991-5a80d8714c19"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.871818 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-utilities" (OuterVolumeSpecName: "utilities") pod "fac9e7cf-4919-4a48-b314-f9b985397e7e" (UID: "fac9e7cf-4919-4a48-b314-f9b985397e7e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.875394 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/514a779d-1633-49f5-a991-5a80d8714c19-kube-api-access-rnjm6" (OuterVolumeSpecName: "kube-api-access-rnjm6") pod "514a779d-1633-49f5-a991-5a80d8714c19" (UID: "514a779d-1633-49f5-a991-5a80d8714c19"). InnerVolumeSpecName "kube-api-access-rnjm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.877801 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fac9e7cf-4919-4a48-b314-f9b985397e7e-kube-api-access-wkbvx" (OuterVolumeSpecName: "kube-api-access-wkbvx") pod "fac9e7cf-4919-4a48-b314-f9b985397e7e" (UID: "fac9e7cf-4919-4a48-b314-f9b985397e7e"). InnerVolumeSpecName "kube-api-access-wkbvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.921537 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "514a779d-1633-49f5-a991-5a80d8714c19" (UID: "514a779d-1633-49f5-a991-5a80d8714c19"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.922461 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fac9e7cf-4919-4a48-b314-f9b985397e7e" (UID: "fac9e7cf-4919-4a48-b314-f9b985397e7e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.971679 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.971717 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wkbvx\" (UniqueName: \"kubernetes.io/projected/fac9e7cf-4919-4a48-b314-f9b985397e7e-kube-api-access-wkbvx\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.971732 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.971744 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fac9e7cf-4919-4a48-b314-f9b985397e7e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.971755 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/514a779d-1633-49f5-a991-5a80d8714c19-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:35 crc kubenswrapper[4706]: I1206 05:24:35.971767 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnjm6\" (UniqueName: \"kubernetes.io/projected/514a779d-1633-49f5-a991-5a80d8714c19-kube-api-access-rnjm6\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.227276 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9hq8t" event={"ID":"fac9e7cf-4919-4a48-b314-f9b985397e7e","Type":"ContainerDied","Data":"7e4d892a7209523964ebbed9009156c22bcf9790f50d3447d7d3a433d4fdc750"} Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.227346 4706 scope.go:117] "RemoveContainer" containerID="eb7ae86cfa3f35e8d8f59d40d8139be27ddbaac3b00af219a688ae33f0f8d778" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.229201 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9hq8t" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.230958 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-svf4w" event={"ID":"514a779d-1633-49f5-a991-5a80d8714c19","Type":"ContainerDied","Data":"856b38db7af56babe4f10b056895e61e898422e885e971b94b226a4d3bb9745c"} Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.231166 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-svf4w" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.251459 4706 scope.go:117] "RemoveContainer" containerID="b21c91b14d3bea00a6952027d60ea46c36b1bf093e970f4ad1de459fdfaa553e" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.275730 4706 scope.go:117] "RemoveContainer" containerID="fbad4d63d909c3eabeb0e96bb2186b8179ad7955dcb5456472c41c48a1587f58" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.283413 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9hq8t"] Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.289789 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9hq8t"] Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.296834 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-svf4w"] Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.302316 4706 scope.go:117] "RemoveContainer" containerID="c8ec52a982ab011499bfb9d7b688ca56a6116a636202bb4511ca85ce7bc63a53" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.303300 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-svf4w"] Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.326033 4706 scope.go:117] "RemoveContainer" containerID="a8c7de5499a4dac70c76a1e82816f93d9ebe14a510da0c23a7b15f153bb1f3b5" Dec 06 05:24:36 crc kubenswrapper[4706]: I1206 05:24:36.353121 4706 scope.go:117] "RemoveContainer" containerID="1db8644cab93daaae7251ee05a3472b05944a76e27862c608542d0e759043e06" Dec 06 05:24:37 crc kubenswrapper[4706]: I1206 05:24:37.240944 4706 generic.go:334] "Generic (PLEG): container finished" podID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerID="b077826243b8e814c0f7086499e2748c7ab02c6fbd44d144f33c161fc220bb3c" exitCode=0 Dec 06 05:24:37 crc kubenswrapper[4706]: I1206 05:24:37.241021 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wx94f" event={"ID":"404f2b83-1030-4b10-b1cf-c7db67aae01f","Type":"ContainerDied","Data":"b077826243b8e814c0f7086499e2748c7ab02c6fbd44d144f33c161fc220bb3c"} Dec 06 05:24:37 crc kubenswrapper[4706]: I1206 05:24:37.396257 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-lzm5j"] Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.049974 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="514a779d-1633-49f5-a991-5a80d8714c19" path="/var/lib/kubelet/pods/514a779d-1633-49f5-a991-5a80d8714c19/volumes" Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.050941 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" path="/var/lib/kubelet/pods/fac9e7cf-4919-4a48-b314-f9b985397e7e/volumes" Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.249798 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerStarted","Data":"ea614a3bb659c8af872ae774c19993e551d90992b3e281ddcf9f6598e67abf98"} Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.254006 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerStarted","Data":"ede8213789bca7ed5e6b8ac6b60825693d5673d776f627ce68e07b14b0fde798"} Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.256206 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerStarted","Data":"2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8"} Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.258107 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerStarted","Data":"b49e3047ac6356c1753cb6d45b6887afb73cb85ac18544ae0f0657b8caa04cbd"} Dec 06 05:24:38 crc kubenswrapper[4706]: I1206 05:24:38.259842 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerStarted","Data":"db2cb9c9f79e99b736443dd2bcbab250397fb94041572c0a35cc366776112e58"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.268911 4706 generic.go:334] "Generic (PLEG): container finished" podID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerID="db2cb9c9f79e99b736443dd2bcbab250397fb94041572c0a35cc366776112e58" exitCode=0 Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.268979 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerDied","Data":"db2cb9c9f79e99b736443dd2bcbab250397fb94041572c0a35cc366776112e58"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.280571 4706 generic.go:334] "Generic (PLEG): container finished" podID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerID="ea614a3bb659c8af872ae774c19993e551d90992b3e281ddcf9f6598e67abf98" exitCode=0 Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.280628 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerDied","Data":"ea614a3bb659c8af872ae774c19993e551d90992b3e281ddcf9f6598e67abf98"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.285727 4706 generic.go:334] "Generic (PLEG): container finished" podID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerID="ede8213789bca7ed5e6b8ac6b60825693d5673d776f627ce68e07b14b0fde798" exitCode=0 Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.285805 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerDied","Data":"ede8213789bca7ed5e6b8ac6b60825693d5673d776f627ce68e07b14b0fde798"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.296526 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wx94f" event={"ID":"404f2b83-1030-4b10-b1cf-c7db67aae01f","Type":"ContainerStarted","Data":"317775150df92de25f45a4135dbf96e2c76ac1d007b9491475c05d59a00cd055"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.299995 4706 generic.go:334] "Generic (PLEG): container finished" podID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerID="2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8" exitCode=0 Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.300093 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerDied","Data":"2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.306409 4706 generic.go:334] "Generic (PLEG): container finished" podID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerID="b49e3047ac6356c1753cb6d45b6887afb73cb85ac18544ae0f0657b8caa04cbd" exitCode=0 Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.306458 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerDied","Data":"b49e3047ac6356c1753cb6d45b6887afb73cb85ac18544ae0f0657b8caa04cbd"} Dec 06 05:24:39 crc kubenswrapper[4706]: I1206 05:24:39.359828 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wx94f" podStartSLOduration=9.829897886 podStartE2EDuration="2m2.359810485s" podCreationTimestamp="2025-12-06 05:22:37 +0000 UTC" firstStartedPulling="2025-12-06 05:22:46.149718343 +0000 UTC m=+188.477542287" lastFinishedPulling="2025-12-06 05:24:38.679630902 +0000 UTC m=+301.007454886" observedRunningTime="2025-12-06 05:24:39.358329805 +0000 UTC m=+301.686153769" watchObservedRunningTime="2025-12-06 05:24:39.359810485 +0000 UTC m=+301.687634429" Dec 06 05:24:40 crc kubenswrapper[4706]: I1206 05:24:40.314974 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerStarted","Data":"e2ad0d4599c20f803d7d4b64bccc309fbc3cbb9b90088169460a2106e2d0a5dd"} Dec 06 05:24:40 crc kubenswrapper[4706]: I1206 05:24:40.322868 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerStarted","Data":"92d79e0671426b72ca23e1771655166757ed0b3fd2ef3489f8b34cc1c211998c"} Dec 06 05:24:40 crc kubenswrapper[4706]: I1206 05:24:40.326223 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerStarted","Data":"5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10"} Dec 06 05:24:40 crc kubenswrapper[4706]: I1206 05:24:40.333728 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j4frb" podStartSLOduration=9.513311013 podStartE2EDuration="2m2.333713343s" podCreationTimestamp="2025-12-06 05:22:38 +0000 UTC" firstStartedPulling="2025-12-06 05:22:47.224885364 +0000 UTC m=+189.552709308" lastFinishedPulling="2025-12-06 05:24:40.045287684 +0000 UTC m=+302.373111638" observedRunningTime="2025-12-06 05:24:40.333521978 +0000 UTC m=+302.661345932" watchObservedRunningTime="2025-12-06 05:24:40.333713343 +0000 UTC m=+302.661537287" Dec 06 05:24:40 crc kubenswrapper[4706]: I1206 05:24:40.351402 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r2clj" podStartSLOduration=9.654419808 podStartE2EDuration="2m1.351378427s" podCreationTimestamp="2025-12-06 05:22:39 +0000 UTC" firstStartedPulling="2025-12-06 05:22:48.260926348 +0000 UTC m=+190.588750292" lastFinishedPulling="2025-12-06 05:24:39.957884927 +0000 UTC m=+302.285708911" observedRunningTime="2025-12-06 05:24:40.350567375 +0000 UTC m=+302.678391319" watchObservedRunningTime="2025-12-06 05:24:40.351378427 +0000 UTC m=+302.679202381" Dec 06 05:24:40 crc kubenswrapper[4706]: I1206 05:24:40.370537 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-98kq2" podStartSLOduration=11.59223432 podStartE2EDuration="2m5.370516782s" podCreationTimestamp="2025-12-06 05:22:35 +0000 UTC" firstStartedPulling="2025-12-06 05:22:46.213186396 +0000 UTC m=+188.541010350" lastFinishedPulling="2025-12-06 05:24:39.991468848 +0000 UTC m=+302.319292812" observedRunningTime="2025-12-06 05:24:40.369637679 +0000 UTC m=+302.697461633" watchObservedRunningTime="2025-12-06 05:24:40.370516782 +0000 UTC m=+302.698340726" Dec 06 05:24:41 crc kubenswrapper[4706]: I1206 05:24:41.333793 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerStarted","Data":"26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba"} Dec 06 05:24:41 crc kubenswrapper[4706]: I1206 05:24:41.337434 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerStarted","Data":"f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c"} Dec 06 05:24:41 crc kubenswrapper[4706]: I1206 05:24:41.350840 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j5lbl" podStartSLOduration=11.255623327 podStartE2EDuration="2m5.350820256s" podCreationTimestamp="2025-12-06 05:22:36 +0000 UTC" firstStartedPulling="2025-12-06 05:22:46.159359694 +0000 UTC m=+188.487183638" lastFinishedPulling="2025-12-06 05:24:40.254556613 +0000 UTC m=+302.582380567" observedRunningTime="2025-12-06 05:24:41.349999764 +0000 UTC m=+303.677823718" watchObservedRunningTime="2025-12-06 05:24:41.350820256 +0000 UTC m=+303.678644200" Dec 06 05:24:41 crc kubenswrapper[4706]: I1206 05:24:41.369239 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dwthx" podStartSLOduration=9.964184746 podStartE2EDuration="2m3.369222021s" podCreationTimestamp="2025-12-06 05:22:38 +0000 UTC" firstStartedPulling="2025-12-06 05:22:47.227685239 +0000 UTC m=+189.555509193" lastFinishedPulling="2025-12-06 05:24:40.632722524 +0000 UTC m=+302.960546468" observedRunningTime="2025-12-06 05:24:41.368552722 +0000 UTC m=+303.696376666" watchObservedRunningTime="2025-12-06 05:24:41.369222021 +0000 UTC m=+303.697045965" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.293393 4706 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294242 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="registry-server" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294262 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="registry-server" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294283 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="extract-utilities" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294291 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="extract-utilities" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294306 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a84582fb-ebc7-4e13-9d93-9db3cfa2edee" containerName="pruner" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294314 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a84582fb-ebc7-4e13-9d93-9db3cfa2edee" containerName="pruner" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294332 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="extract-content" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294339 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="extract-content" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294348 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="extract-utilities" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294355 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="extract-utilities" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294363 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="registry-server" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294371 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="registry-server" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.294380 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="extract-content" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294387 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="extract-content" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294536 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a84582fb-ebc7-4e13-9d93-9db3cfa2edee" containerName="pruner" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294551 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="fac9e7cf-4919-4a48-b314-f9b985397e7e" containerName="registry-server" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.294559 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="514a779d-1633-49f5-a991-5a80d8714c19" containerName="registry-server" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.295204 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.296736 4706 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.296745 4706 file.go:109] "Unable to process watch event" err="can't process config file \"/etc/kubernetes/manifests/kube-apiserver-pod.yaml\": /etc/kubernetes/manifests/kube-apiserver-pod.yaml: couldn't parse as pod(Object 'Kind' is missing in 'null'), please check config file" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.297182 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24" gracePeriod=15 Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.297245 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f" gracePeriod=15 Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.297275 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad" gracePeriod=15 Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.297246 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6" gracePeriod=15 Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.297362 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f" gracePeriod=15 Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298405 4706 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298609 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298629 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298643 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298652 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298662 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298670 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298678 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298687 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298696 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298705 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298717 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298725 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.298739 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298747 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298895 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298912 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298929 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298942 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.298952 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.299248 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314386 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314463 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314542 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314579 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314643 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314705 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314749 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.314811 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.349064 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415745 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415791 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415826 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415858 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415885 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415889 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415920 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415951 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415953 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415986 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415967 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.416007 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.416040 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.416131 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.415979 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.416215 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.645459 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:24:44 crc kubenswrapper[4706]: W1206 05:24:44.670427 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-666f3614e35f3102b912312b901b1bcf43a60d3bf7095315787ac5abe53833f4 WatchSource:0}: Error finding container 666f3614e35f3102b912312b901b1bcf43a60d3bf7095315787ac5abe53833f4: Status 404 returned error can't find the container with id 666f3614e35f3102b912312b901b1bcf43a60d3bf7095315787ac5abe53833f4 Dec 06 05:24:44 crc kubenswrapper[4706]: E1206 05:24:44.675307 4706 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187e88f3ccfff460 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-06 05:24:44.673569888 +0000 UTC m=+307.001393872,LastTimestamp:2025-12-06 05:24:44.673569888 +0000 UTC m=+307.001393872,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.713657 4706 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" start-of-body= Dec 06 05:24:44 crc kubenswrapper[4706]: I1206 05:24:44.713739 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="Get \"https://192.168.126.11:6443/readyz\": dial tcp 192.168.126.11:6443: connect: connection refused" Dec 06 05:24:45 crc kubenswrapper[4706]: I1206 05:24:45.394136 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"666f3614e35f3102b912312b901b1bcf43a60d3bf7095315787ac5abe53833f4"} Dec 06 05:24:45 crc kubenswrapper[4706]: I1206 05:24:45.398109 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 06 05:24:45 crc kubenswrapper[4706]: I1206 05:24:45.399909 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 06 05:24:45 crc kubenswrapper[4706]: I1206 05:24:45.400645 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f" exitCode=2 Dec 06 05:24:45 crc kubenswrapper[4706]: I1206 05:24:45.998078 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:24:45 crc kubenswrapper[4706]: I1206 05:24:45.998120 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.047899 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.048479 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.048862 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.416908 4706 generic.go:334] "Generic (PLEG): container finished" podID="bd90d5c4-d32b-418b-9cb5-b532c9700699" containerID="b71fc574f1a02e7bb52abe50878fcce79b8631b81fb4017e6ff5b076f43e49d1" exitCode=0 Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.417100 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"bd90d5c4-d32b-418b-9cb5-b532c9700699","Type":"ContainerDied","Data":"b71fc574f1a02e7bb52abe50878fcce79b8631b81fb4017e6ff5b076f43e49d1"} Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.422924 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.423789 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.424340 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.426254 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2"} Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.427170 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.427678 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.428203 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.429719 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.431955 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.433746 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24" exitCode=0 Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.433780 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad" exitCode=0 Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.433790 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6" exitCode=0 Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.434082 4706 scope.go:117] "RemoveContainer" containerID="3bb7df692675a1f1c289441c8670e4187a2b060a21b236685b8ef3586260ecf3" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.499974 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.500760 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.501084 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.501251 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.528297 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.528698 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.571217 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.572027 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.572623 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.573022 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.573341 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.746803 4706 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.747713 4706 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.748545 4706 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.748818 4706 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.749243 4706 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:46 crc kubenswrapper[4706]: I1206 05:24:46.749310 4706 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.750016 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="200ms" Dec 06 05:24:46 crc kubenswrapper[4706]: E1206 05:24:46.951012 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="400ms" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.248933 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.249815 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.250351 4706 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.250693 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.251127 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.251422 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.251660 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.352637 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="800ms" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.370842 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.370941 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.371029 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.371251 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.371278 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.371324 4706 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.371324 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.448341 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.448951 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f" exitCode=0 Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.449163 4706 scope.go:117] "RemoveContainer" containerID="4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.449319 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.469353 4706 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.469517 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.469661 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.469959 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.470160 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.473182 4706 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.473204 4706 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.476366 4706 scope.go:117] "RemoveContainer" containerID="0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.499740 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.500477 4706 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.500713 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.501037 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.501375 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.501541 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.510970 4706 scope.go:117] "RemoveContainer" containerID="9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.530995 4706 scope.go:117] "RemoveContainer" containerID="0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.565145 4706 scope.go:117] "RemoveContainer" containerID="bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.588313 4706 scope.go:117] "RemoveContainer" containerID="160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.630963 4706 scope.go:117] "RemoveContainer" containerID="4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.633642 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\": container with ID starting with 4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24 not found: ID does not exist" containerID="4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.633721 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24"} err="failed to get container status \"4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\": rpc error: code = NotFound desc = could not find container \"4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24\": container with ID starting with 4c7c4e278ebc9556545d0cffe28392b6d9a4332a725b10d7f260db2b4b9e0c24 not found: ID does not exist" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.633753 4706 scope.go:117] "RemoveContainer" containerID="0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.634043 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\": container with ID starting with 0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad not found: ID does not exist" containerID="0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.634085 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad"} err="failed to get container status \"0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\": rpc error: code = NotFound desc = could not find container \"0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad\": container with ID starting with 0f973afb1db01f54f134c2aa6b480f2bc6aa2bebc7897264ac41cab9b13a67ad not found: ID does not exist" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.634102 4706 scope.go:117] "RemoveContainer" containerID="9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.634353 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\": container with ID starting with 9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6 not found: ID does not exist" containerID="9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.634378 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6"} err="failed to get container status \"9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\": rpc error: code = NotFound desc = could not find container \"9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6\": container with ID starting with 9eb08e167f59da3636e731d8b3959029b3f7d1f1d18bc4bff34acd6bffc2d7e6 not found: ID does not exist" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.634394 4706 scope.go:117] "RemoveContainer" containerID="0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.634736 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\": container with ID starting with 0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f not found: ID does not exist" containerID="0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.634779 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f"} err="failed to get container status \"0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\": rpc error: code = NotFound desc = could not find container \"0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f\": container with ID starting with 0b4408a760eea194b55f62089ca54401b78cb3b39e7a7ddf491792aa9989209f not found: ID does not exist" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.634792 4706 scope.go:117] "RemoveContainer" containerID="bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.635002 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\": container with ID starting with bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f not found: ID does not exist" containerID="bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.635027 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f"} err="failed to get container status \"bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\": rpc error: code = NotFound desc = could not find container \"bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f\": container with ID starting with bc417ea8b874b68b89de9ed96e103e919d1b6a98e1030d5365e93a16b84f7a0f not found: ID does not exist" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.635083 4706 scope.go:117] "RemoveContainer" containerID="160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.635343 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\": container with ID starting with 160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44 not found: ID does not exist" containerID="160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.635368 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44"} err="failed to get container status \"160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\": rpc error: code = NotFound desc = could not find container \"160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44\": container with ID starting with 160dff5d5fcf77852ed2c8a507175640212f2dde1b62421b2ee43632799bfa44 not found: ID does not exist" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.638240 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:24:47Z\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:24:47Z\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:24:47Z\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-12-06T05:24:47Z\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Patch \"https://api-int.crc.testing:6443/api/v1/nodes/crc/status?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.638476 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.638876 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.639055 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.639202 4706 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": Get \"https://api-int.crc.testing:6443/api/v1/nodes/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: E1206 05:24:47.639220 4706 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.707807 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.708327 4706 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.708719 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.709374 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.709635 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.709892 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.876517 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-var-lock\") pod \"bd90d5c4-d32b-418b-9cb5-b532c9700699\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.876583 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd90d5c4-d32b-418b-9cb5-b532c9700699-kube-api-access\") pod \"bd90d5c4-d32b-418b-9cb5-b532c9700699\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.876643 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-kubelet-dir\") pod \"bd90d5c4-d32b-418b-9cb5-b532c9700699\" (UID: \"bd90d5c4-d32b-418b-9cb5-b532c9700699\") " Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.876997 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "bd90d5c4-d32b-418b-9cb5-b532c9700699" (UID: "bd90d5c4-d32b-418b-9cb5-b532c9700699"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.877028 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-var-lock" (OuterVolumeSpecName: "var-lock") pod "bd90d5c4-d32b-418b-9cb5-b532c9700699" (UID: "bd90d5c4-d32b-418b-9cb5-b532c9700699"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.889505 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd90d5c4-d32b-418b-9cb5-b532c9700699-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "bd90d5c4-d32b-418b-9cb5-b532c9700699" (UID: "bd90d5c4-d32b-418b-9cb5-b532c9700699"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.978826 4706 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-kubelet-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.978870 4706 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/bd90d5c4-d32b-418b-9cb5-b532c9700699-var-lock\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:47 crc kubenswrapper[4706]: I1206 05:24:47.978879 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bd90d5c4-d32b-418b-9cb5-b532c9700699-kube-api-access\") on node \"crc\" DevicePath \"\"" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.043733 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.044868 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.045753 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.046003 4706 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.046251 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.046451 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: E1206 05:24:48.153772 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="1.6s" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.195301 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.195359 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.254952 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.255522 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.256035 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.256474 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.256779 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.257293 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.456517 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.456530 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"bd90d5c4-d32b-418b-9cb5-b532c9700699","Type":"ContainerDied","Data":"953c2baf899066d0c00034ef643a6f7df23324db8ca37fb57ecbeee040d53ebd"} Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.457750 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="953c2baf899066d0c00034ef643a6f7df23324db8ca37fb57ecbeee040d53ebd" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.464729 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.465242 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.465687 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.466165 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.466775 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.497203 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.497913 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.498760 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.499079 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.499368 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.499755 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.627259 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.627700 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.703515 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.704359 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.705272 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.705791 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.706332 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.706931 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:48 crc kubenswrapper[4706]: I1206 05:24:48.707290 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.186573 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.186648 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.243118 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.243912 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.244721 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.245455 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.246029 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.246496 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.247235 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.247800 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.508759 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.509506 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.510284 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.510790 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.511836 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.512344 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.513233 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.514531 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.526697 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.527353 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.527544 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.527714 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.528059 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.530909 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.531280 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.531585 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.608606 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.608681 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.645748 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.646380 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.646671 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: E1206 05:24:49.646769 4706 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187e88f3ccfff460 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-06 05:24:44.673569888 +0000 UTC m=+307.001393872,LastTimestamp:2025-12-06 05:24:44.673569888 +0000 UTC m=+307.001393872,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.647220 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.647402 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.647555 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.647715 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.647904 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: I1206 05:24:49.648084 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:49 crc kubenswrapper[4706]: E1206 05:24:49.754643 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="3.2s" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.506779 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.507171 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.507702 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.508610 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.509028 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.509315 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.509656 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.509996 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:50 crc kubenswrapper[4706]: I1206 05:24:50.510236 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:52 crc kubenswrapper[4706]: E1206 05:24:52.955973 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="6.4s" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.256616 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.256968 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.258259 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.258784 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.259155 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.259559 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.260149 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.260547 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.260825 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:56 crc kubenswrapper[4706]: I1206 05:24:56.261152 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.035613 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.036898 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.037245 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.037761 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.038135 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.038682 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.038840 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.038977 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.039158 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.039617 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.061161 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.061201 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:24:57 crc kubenswrapper[4706]: E1206 05:24:57.061749 4706 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.062804 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:24:57 crc kubenswrapper[4706]: W1206 05:24:57.094588 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-71ce4a0b0482e086fd12c9b1ff9e8ee32adb7068645998fcacc6e231faee0d1d WatchSource:0}: Error finding container 71ce4a0b0482e086fd12c9b1ff9e8ee32adb7068645998fcacc6e231faee0d1d: Status 404 returned error can't find the container with id 71ce4a0b0482e086fd12c9b1ff9e8ee32adb7068645998fcacc6e231faee0d1d Dec 06 05:24:57 crc kubenswrapper[4706]: I1206 05:24:57.508614 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"71ce4a0b0482e086fd12c9b1ff9e8ee32adb7068645998fcacc6e231faee0d1d"} Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.048990 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.049584 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.050002 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.053816 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.054894 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.055487 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.055895 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.056630 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.057134 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:58 crc kubenswrapper[4706]: I1206 05:24:58.057602 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:24:59 crc kubenswrapper[4706]: E1206 05:24:59.358243 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="7s" Dec 06 05:24:59 crc kubenswrapper[4706]: I1206 05:24:59.584604 4706 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 06 05:24:59 crc kubenswrapper[4706]: I1206 05:24:59.584706 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 06 05:24:59 crc kubenswrapper[4706]: E1206 05:24:59.648638 4706 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.23:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.187e88f3ccfff460 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-12-06 05:24:44.673569888 +0000 UTC m=+307.001393872,LastTimestamp:2025-12-06 05:24:44.673569888 +0000 UTC m=+307.001393872,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Dec 06 05:25:02 crc kubenswrapper[4706]: I1206 05:25:02.448897 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" containerName="oauth-openshift" containerID="cri-o://a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6" gracePeriod=15 Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.559593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e38f06ca872382cded24e3ddd7a5a1a8050eb373a044b2f661dee89e00f8eb77"} Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.563032 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.563107 4706 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e" exitCode=1 Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.563134 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e"} Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.563605 4706 scope.go:117] "RemoveContainer" containerID="f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.563946 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.564316 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.564751 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.565006 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.565322 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.565558 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.565863 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.566178 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.566579 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.566799 4706 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:04 crc kubenswrapper[4706]: I1206 05:25:04.567004 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.494009 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.495097 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.495422 4706 status_manager.go:851] "Failed to get status for pod" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-lzm5j\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.495915 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.496434 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.496701 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.496938 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.497189 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.497571 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.498092 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.498375 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.498645 4706 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.498910 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.572312 4706 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="e38f06ca872382cded24e3ddd7a5a1a8050eb373a044b2f661dee89e00f8eb77" exitCode=0 Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.572438 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"e38f06ca872382cded24e3ddd7a5a1a8050eb373a044b2f661dee89e00f8eb77"} Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.572669 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.572706 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.573362 4706 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.573690 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: E1206 05:25:05.573695 4706 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.573938 4706 status_manager.go:851] "Failed to get status for pod" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-lzm5j\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.574234 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.574664 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.574878 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.575176 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.575587 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.576138 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.576521 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.576930 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.577341 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.577688 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.577791 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2fbd0ef01c428448654fa7e7f6473bbf4b430ab03aebc9e5827a8473f853db5e"} Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.578918 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.579741 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.580142 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.580710 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.580992 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" containerID="a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6" exitCode=0 Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581032 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" event={"ID":"a4df44f2-c01b-47ab-a7df-6b30ea0510a3","Type":"ContainerDied","Data":"a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6"} Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581089 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" event={"ID":"a4df44f2-c01b-47ab-a7df-6b30ea0510a3","Type":"ContainerDied","Data":"e8460b4a1cffb24f02ff946aaf9aa0ffc47212e2c95d73d31424b7a4ab840973"} Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581117 4706 scope.go:117] "RemoveContainer" containerID="a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581185 4706 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581265 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581535 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.581929 4706 status_manager.go:851] "Failed to get status for pod" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-lzm5j\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.582313 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.582661 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.583043 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.583453 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.583789 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.584339 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.584829 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.585194 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.585777 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.586322 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.586677 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.587078 4706 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.587446 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.587868 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.588292 4706 status_manager.go:851] "Failed to get status for pod" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-lzm5j\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.588654 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.589007 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.607988 4706 scope.go:117] "RemoveContainer" containerID="a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6" Dec 06 05:25:05 crc kubenswrapper[4706]: E1206 05:25:05.609538 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6\": container with ID starting with a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6 not found: ID does not exist" containerID="a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.609574 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6"} err="failed to get container status \"a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6\": rpc error: code = NotFound desc = could not find container \"a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6\": container with ID starting with a106d244939248bd07f6cb3fa6ddf317bb6b1d38b0e4a6d804c22afae7539ea6 not found: ID does not exist" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.651569 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-router-certs\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.651628 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-error\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.651664 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-idp-0-file-data\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.651698 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-policies\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652065 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7sdt5\" (UniqueName: \"kubernetes.io/projected/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-kube-api-access-7sdt5\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652103 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-service-ca\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652133 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-ocp-branding-template\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652165 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-provider-selection\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652196 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-trusted-ca-bundle\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652231 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-session\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652287 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-serving-cert\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652315 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-dir\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652341 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-cliconfig\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652366 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-login\") pod \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\" (UID: \"a4df44f2-c01b-47ab-a7df-6b30ea0510a3\") " Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652643 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.652755 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.653105 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.653208 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.654043 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.658734 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.659260 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.659513 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.660702 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.661318 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.661617 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.661853 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.662266 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.670657 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-kube-api-access-7sdt5" (OuterVolumeSpecName: "kube-api-access-7sdt5") pod "a4df44f2-c01b-47ab-a7df-6b30ea0510a3" (UID: "a4df44f2-c01b-47ab-a7df-6b30ea0510a3"). InnerVolumeSpecName "kube-api-access-7sdt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753526 4706 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-policies\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753593 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7sdt5\" (UniqueName: \"kubernetes.io/projected/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-kube-api-access-7sdt5\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753610 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753623 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753635 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753646 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753661 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753673 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753687 4706 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-audit-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753700 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753713 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753725 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753737 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.753749 4706 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a4df44f2-c01b-47ab-a7df-6b30ea0510a3-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.900758 4706 status_manager.go:851] "Failed to get status for pod" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" pod="openshift-marketplace/redhat-marketplace-wx94f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-wx94f\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.900922 4706 status_manager.go:851] "Failed to get status for pod" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" pod="openshift-marketplace/redhat-operators-r2clj" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-r2clj\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901076 4706 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901222 4706 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901354 4706 status_manager.go:851] "Failed to get status for pod" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" pod="openshift-network-diagnostics/network-check-target-xd92c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/pods/network-check-target-xd92c\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901499 4706 status_manager.go:851] "Failed to get status for pod" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" pod="openshift-authentication/oauth-openshift-558db77b4-lzm5j" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-authentication/pods/oauth-openshift-558db77b4-lzm5j\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901637 4706 status_manager.go:851] "Failed to get status for pod" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" pod="openshift-marketplace/certified-operators-j5lbl" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-j5lbl\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901775 4706 status_manager.go:851] "Failed to get status for pod" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" pod="openshift-marketplace/redhat-marketplace-dwthx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-dwthx\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.901903 4706 status_manager.go:851] "Failed to get status for pod" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.902144 4706 status_manager.go:851] "Failed to get status for pod" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" pod="openshift-marketplace/community-operators-98kq2" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-98kq2\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.902300 4706 status_manager.go:851] "Failed to get status for pod" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" pod="openshift-marketplace/redhat-operators-j4frb" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-j4frb\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:05 crc kubenswrapper[4706]: I1206 05:25:05.902454 4706 status_manager.go:851] "Failed to get status for pod" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.23:6443: connect: connection refused" Dec 06 05:25:06 crc kubenswrapper[4706]: E1206 05:25:06.359441 4706 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.23:6443: connect: connection refused" interval="7s" Dec 06 05:25:06 crc kubenswrapper[4706]: I1206 05:25:06.588441 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f4261dadf564488b89c349ad96eb2fbbc952fd0fe1466276f32af836042f3bea"} Dec 06 05:25:06 crc kubenswrapper[4706]: I1206 05:25:06.892071 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:25:06 crc kubenswrapper[4706]: I1206 05:25:06.892257 4706 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 06 05:25:06 crc kubenswrapper[4706]: I1206 05:25:06.892336 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 06 05:25:07 crc kubenswrapper[4706]: I1206 05:25:07.525592 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:25:07 crc kubenswrapper[4706]: I1206 05:25:07.600646 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"9aa93335e3548f48ec65ddbe5698342e43268cc25a109f0550e246538be199ca"} Dec 06 05:25:07 crc kubenswrapper[4706]: I1206 05:25:07.600701 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3067b8be782cb7ff8b07ee43affcc5d9843243c5a673907bc1e66b8e2aa4388f"} Dec 06 05:25:08 crc kubenswrapper[4706]: I1206 05:25:08.610572 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"9382abaedf0357eaee400dabe7ed4ae9e7a3573b5eb6e8f93a148b8d5023d561"} Dec 06 05:25:08 crc kubenswrapper[4706]: I1206 05:25:08.610829 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"25ba53a382a4f9e5ae78702c7f0c27c9329010df8ad1d5b2574d6499ead19250"} Dec 06 05:25:08 crc kubenswrapper[4706]: I1206 05:25:08.610921 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:08 crc kubenswrapper[4706]: I1206 05:25:08.611170 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:08 crc kubenswrapper[4706]: I1206 05:25:08.611206 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:12 crc kubenswrapper[4706]: I1206 05:25:12.063785 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:12 crc kubenswrapper[4706]: I1206 05:25:12.064237 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:12 crc kubenswrapper[4706]: I1206 05:25:12.070855 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:13 crc kubenswrapper[4706]: I1206 05:25:13.626489 4706 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:14 crc kubenswrapper[4706]: I1206 05:25:14.648586 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:14 crc kubenswrapper[4706]: I1206 05:25:14.648624 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:14 crc kubenswrapper[4706]: I1206 05:25:14.652936 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:14 crc kubenswrapper[4706]: I1206 05:25:14.654928 4706 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="545303e0-6b56-4c5b-b2ef-dad513ce2c2c" Dec 06 05:25:15 crc kubenswrapper[4706]: I1206 05:25:15.653999 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:15 crc kubenswrapper[4706]: I1206 05:25:15.654097 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:16 crc kubenswrapper[4706]: I1206 05:25:16.892629 4706 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 06 05:25:16 crc kubenswrapper[4706]: I1206 05:25:16.892946 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 06 05:25:17 crc kubenswrapper[4706]: I1206 05:25:17.067734 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Dec 06 05:25:17 crc kubenswrapper[4706]: I1206 05:25:17.068890 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:17 crc kubenswrapper[4706]: I1206 05:25:17.068944 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:18 crc kubenswrapper[4706]: I1206 05:25:18.096296 4706 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="545303e0-6b56-4c5b-b2ef-dad513ce2c2c" Dec 06 05:25:25 crc kubenswrapper[4706]: I1206 05:25:25.924833 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.136150 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.242858 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.423036 4706 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.893722 4706 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.893780 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.893887 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.894515 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"2fbd0ef01c428448654fa7e7f6473bbf4b430ab03aebc9e5827a8473f853db5e"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.894615 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://2fbd0ef01c428448654fa7e7f6473bbf4b430ab03aebc9e5827a8473f853db5e" gracePeriod=30 Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.967194 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Dec 06 05:25:26 crc kubenswrapper[4706]: I1206 05:25:26.984512 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Dec 06 05:25:27 crc kubenswrapper[4706]: I1206 05:25:27.189252 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Dec 06 05:25:27 crc kubenswrapper[4706]: I1206 05:25:27.512284 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Dec 06 05:25:27 crc kubenswrapper[4706]: I1206 05:25:27.719804 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Dec 06 05:25:28 crc kubenswrapper[4706]: I1206 05:25:28.050357 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Dec 06 05:25:28 crc kubenswrapper[4706]: I1206 05:25:28.143437 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Dec 06 05:25:28 crc kubenswrapper[4706]: I1206 05:25:28.257628 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Dec 06 05:25:28 crc kubenswrapper[4706]: I1206 05:25:28.534877 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Dec 06 05:25:28 crc kubenswrapper[4706]: I1206 05:25:28.738551 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Dec 06 05:25:28 crc kubenswrapper[4706]: I1206 05:25:28.773664 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Dec 06 05:25:29 crc kubenswrapper[4706]: I1206 05:25:29.022600 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Dec 06 05:25:29 crc kubenswrapper[4706]: I1206 05:25:29.081097 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Dec 06 05:25:29 crc kubenswrapper[4706]: I1206 05:25:29.458955 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Dec 06 05:25:30 crc kubenswrapper[4706]: I1206 05:25:30.132003 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Dec 06 05:25:30 crc kubenswrapper[4706]: I1206 05:25:30.135180 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Dec 06 05:25:30 crc kubenswrapper[4706]: I1206 05:25:30.694517 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Dec 06 05:25:31 crc kubenswrapper[4706]: I1206 05:25:31.385942 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Dec 06 05:25:31 crc kubenswrapper[4706]: I1206 05:25:31.646699 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Dec 06 05:25:32 crc kubenswrapper[4706]: I1206 05:25:32.222282 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.136781 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.305475 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.322146 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.377633 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.641663 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.791121 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.852984 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Dec 06 05:25:33 crc kubenswrapper[4706]: I1206 05:25:33.952719 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Dec 06 05:25:34 crc kubenswrapper[4706]: I1206 05:25:34.102446 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Dec 06 05:25:34 crc kubenswrapper[4706]: I1206 05:25:34.338975 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Dec 06 05:25:34 crc kubenswrapper[4706]: I1206 05:25:34.544437 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Dec 06 05:25:35 crc kubenswrapper[4706]: I1206 05:25:35.045268 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Dec 06 05:25:35 crc kubenswrapper[4706]: I1206 05:25:35.443554 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Dec 06 05:25:36 crc kubenswrapper[4706]: I1206 05:25:36.031007 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Dec 06 05:25:37 crc kubenswrapper[4706]: I1206 05:25:37.497303 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Dec 06 05:25:37 crc kubenswrapper[4706]: I1206 05:25:37.803827 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Dec 06 05:25:37 crc kubenswrapper[4706]: I1206 05:25:37.991989 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Dec 06 05:25:40 crc kubenswrapper[4706]: I1206 05:25:40.862153 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Dec 06 05:25:41 crc kubenswrapper[4706]: I1206 05:25:41.404170 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 06 05:25:44 crc kubenswrapper[4706]: I1206 05:25:44.002307 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Dec 06 05:25:44 crc kubenswrapper[4706]: I1206 05:25:44.268667 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Dec 06 05:25:44 crc kubenswrapper[4706]: I1206 05:25:44.303135 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Dec 06 05:25:44 crc kubenswrapper[4706]: I1206 05:25:44.652002 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Dec 06 05:25:45 crc kubenswrapper[4706]: I1206 05:25:45.038976 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Dec 06 05:25:45 crc kubenswrapper[4706]: I1206 05:25:45.117237 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Dec 06 05:25:45 crc kubenswrapper[4706]: I1206 05:25:45.461528 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Dec 06 05:25:45 crc kubenswrapper[4706]: I1206 05:25:45.484912 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Dec 06 05:25:46 crc kubenswrapper[4706]: I1206 05:25:46.017597 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Dec 06 05:25:46 crc kubenswrapper[4706]: I1206 05:25:46.681687 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Dec 06 05:25:47 crc kubenswrapper[4706]: I1206 05:25:47.658605 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Dec 06 05:25:48 crc kubenswrapper[4706]: I1206 05:25:48.057502 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Dec 06 05:25:48 crc kubenswrapper[4706]: I1206 05:25:48.385355 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 06 05:25:48 crc kubenswrapper[4706]: I1206 05:25:48.714334 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Dec 06 05:25:49 crc kubenswrapper[4706]: I1206 05:25:49.225428 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Dec 06 05:25:49 crc kubenswrapper[4706]: I1206 05:25:49.341667 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Dec 06 05:25:49 crc kubenswrapper[4706]: I1206 05:25:49.373980 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.028966 4706 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.085778 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.286019 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.411551 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.610677 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.867106 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.877734 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Dec 06 05:25:50 crc kubenswrapper[4706]: I1206 05:25:50.951683 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.038476 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.113211 4706 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.184086 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.194783 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.400338 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.405327 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.549724 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Dec 06 05:25:51 crc kubenswrapper[4706]: I1206 05:25:51.587669 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Dec 06 05:25:52 crc kubenswrapper[4706]: I1206 05:25:52.162093 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Dec 06 05:25:52 crc kubenswrapper[4706]: I1206 05:25:52.431206 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Dec 06 05:25:52 crc kubenswrapper[4706]: I1206 05:25:52.511728 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Dec 06 05:25:52 crc kubenswrapper[4706]: I1206 05:25:52.603543 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Dec 06 05:25:53 crc kubenswrapper[4706]: I1206 05:25:53.114913 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Dec 06 05:25:53 crc kubenswrapper[4706]: I1206 05:25:53.965454 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Dec 06 05:25:53 crc kubenswrapper[4706]: I1206 05:25:53.971988 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.060249 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.076002 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.083567 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.258771 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.523789 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.775740 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Dec 06 05:25:54 crc kubenswrapper[4706]: I1206 05:25:54.975240 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.138015 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.200180 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.329720 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.523391 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.524186 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.752970 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.804031 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.811826 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.864764 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Dec 06 05:25:55 crc kubenswrapper[4706]: I1206 05:25:55.871981 4706 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Dec 06 05:25:56 crc kubenswrapper[4706]: I1206 05:25:56.310014 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Dec 06 05:25:56 crc kubenswrapper[4706]: I1206 05:25:56.781867 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Dec 06 05:25:56 crc kubenswrapper[4706]: I1206 05:25:56.869205 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Dec 06 05:25:57 crc kubenswrapper[4706]: I1206 05:25:57.095550 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Dec 06 05:25:57 crc kubenswrapper[4706]: I1206 05:25:57.189086 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Dec 06 05:25:57 crc kubenswrapper[4706]: I1206 05:25:57.226097 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Dec 06 05:25:57 crc kubenswrapper[4706]: I1206 05:25:57.496313 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 06 05:25:57 crc kubenswrapper[4706]: I1206 05:25:57.583571 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.059231 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.107437 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.358689 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.396193 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.447241 4706 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.452555 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=74.452530032 podStartE2EDuration="1m14.452530032s" podCreationTimestamp="2025-12-06 05:24:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:25:13.150589638 +0000 UTC m=+335.478413602" watchObservedRunningTime="2025-12-06 05:25:58.452530032 +0000 UTC m=+380.780354006" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.455471 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-authentication/oauth-openshift-558db77b4-lzm5j"] Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.455551 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-575cc5b957-jtdvz","openshift-kube-apiserver/kube-apiserver-crc"] Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.455953 4706 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.455993 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="15594eef-1c46-43e2-9910-088593c720de" Dec 06 05:25:58 crc kubenswrapper[4706]: E1206 05:25:58.455973 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" containerName="installer" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.456126 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" containerName="installer" Dec 06 05:25:58 crc kubenswrapper[4706]: E1206 05:25:58.456151 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" containerName="oauth-openshift" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.456165 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" containerName="oauth-openshift" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.456668 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" containerName="oauth-openshift" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.456709 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd90d5c4-d32b-418b-9cb5-b532c9700699" containerName="installer" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.457467 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.459494 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.459567 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.460422 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.460883 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.461894 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.462253 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.462605 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.464538 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.464936 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.465332 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.465198 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.465536 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.475495 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.489736 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=45.489713875 podStartE2EDuration="45.489713875s" podCreationTimestamp="2025-12-06 05:25:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:25:58.482691657 +0000 UTC m=+380.810515631" watchObservedRunningTime="2025-12-06 05:25:58.489713875 +0000 UTC m=+380.817537839" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.491404 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.491581 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.531442 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-error\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532074 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg69n\" (UniqueName: \"kubernetes.io/projected/22853a63-db92-4c54-91dd-245763bfa04b-kube-api-access-qg69n\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532204 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532312 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-audit-policies\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532408 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-login\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532503 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532600 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532743 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-service-ca\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532855 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-session\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.532943 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.533021 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-router-certs\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.533141 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.533260 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/22853a63-db92-4c54-91dd-245763bfa04b-audit-dir\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.533376 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.634208 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.634576 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-error\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.634682 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg69n\" (UniqueName: \"kubernetes.io/projected/22853a63-db92-4c54-91dd-245763bfa04b-kube-api-access-qg69n\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.634776 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.634893 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-audit-policies\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.634992 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-login\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635134 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635235 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635341 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-service-ca\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635444 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-session\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635545 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635630 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-router-certs\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635740 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.635855 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/22853a63-db92-4c54-91dd-245763bfa04b-audit-dir\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.636004 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/22853a63-db92-4c54-91dd-245763bfa04b-audit-dir\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.636488 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-service-ca\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.637571 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-cliconfig\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.637952 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.639637 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/22853a63-db92-4c54-91dd-245763bfa04b-audit-policies\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.640133 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-router-certs\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.640369 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.640566 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-serving-cert\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.640596 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-session\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.642562 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-login\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.642972 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.643388 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.644399 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/22853a63-db92-4c54-91dd-245763bfa04b-v4-0-config-user-template-error\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.658160 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg69n\" (UniqueName: \"kubernetes.io/projected/22853a63-db92-4c54-91dd-245763bfa04b-kube-api-access-qg69n\") pod \"oauth-openshift-575cc5b957-jtdvz\" (UID: \"22853a63-db92-4c54-91dd-245763bfa04b\") " pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.715329 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.807655 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.926428 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.936692 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.938525 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.938582 4706 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="2fbd0ef01c428448654fa7e7f6473bbf4b430ab03aebc9e5827a8473f853db5e" exitCode=137 Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.938685 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"2fbd0ef01c428448654fa7e7f6473bbf4b430ab03aebc9e5827a8473f853db5e"} Dec 06 05:25:58 crc kubenswrapper[4706]: I1206 05:25:58.938754 4706 scope.go:117] "RemoveContainer" containerID="f9f8bcb9ee42496e2e19d3a3e5f375dc0131b97cb491d26d079a662e32f3e69e" Dec 06 05:25:59 crc kubenswrapper[4706]: I1206 05:25:59.081235 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Dec 06 05:25:59 crc kubenswrapper[4706]: I1206 05:25:59.610673 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 06 05:25:59 crc kubenswrapper[4706]: I1206 05:25:59.950224 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Dec 06 05:25:59 crc kubenswrapper[4706]: I1206 05:25:59.951699 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fcef7308ac9494c78d4f152e0888fdbfcd952c613b35b52b5f89a61a09da6532"} Dec 06 05:25:59 crc kubenswrapper[4706]: I1206 05:25:59.977492 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.030476 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.042101 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4df44f2-c01b-47ab-a7df-6b30ea0510a3" path="/var/lib/kubelet/pods/a4df44f2-c01b-47ab-a7df-6b30ea0510a3/volumes" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.522155 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.599349 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.652941 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.676120 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.770547 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.773077 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.892133 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Dec 06 05:26:00 crc kubenswrapper[4706]: I1206 05:26:00.943399 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.034846 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.085773 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.115535 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.250378 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.348460 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.435141 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.531618 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.660497 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.671240 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.713482 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.742199 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.746036 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.934891 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Dec 06 05:26:01 crc kubenswrapper[4706]: I1206 05:26:01.947142 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Dec 06 05:26:01 crc kubenswrapper[4706]: E1206 05:26:01.980401 4706 log.go:32] "RunPodSandbox from runtime service failed" err=< Dec 06 05:26:01 crc kubenswrapper[4706]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-575cc5b957-jtdvz_openshift-authentication_22853a63-db92-4c54-91dd-245763bfa04b_0(0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81): error adding pod openshift-authentication_oauth-openshift-575cc5b957-jtdvz to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81" Netns:"/var/run/netns/56b43957-55ea-4381-9537-85bc2fc01448" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-575cc5b957-jtdvz;K8S_POD_INFRA_CONTAINER_ID=0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81;K8S_POD_UID=22853a63-db92-4c54-91dd-245763bfa04b" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-575cc5b957-jtdvz] networking: Multus: [openshift-authentication/oauth-openshift-575cc5b957-jtdvz/22853a63-db92-4c54-91dd-245763bfa04b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-575cc5b957-jtdvz in out of cluster comm: pod "oauth-openshift-575cc5b957-jtdvz" not found Dec 06 05:26:01 crc kubenswrapper[4706]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 06 05:26:01 crc kubenswrapper[4706]: > Dec 06 05:26:01 crc kubenswrapper[4706]: E1206 05:26:01.980485 4706 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Dec 06 05:26:01 crc kubenswrapper[4706]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-575cc5b957-jtdvz_openshift-authentication_22853a63-db92-4c54-91dd-245763bfa04b_0(0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81): error adding pod openshift-authentication_oauth-openshift-575cc5b957-jtdvz to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81" Netns:"/var/run/netns/56b43957-55ea-4381-9537-85bc2fc01448" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-575cc5b957-jtdvz;K8S_POD_INFRA_CONTAINER_ID=0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81;K8S_POD_UID=22853a63-db92-4c54-91dd-245763bfa04b" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-575cc5b957-jtdvz] networking: Multus: [openshift-authentication/oauth-openshift-575cc5b957-jtdvz/22853a63-db92-4c54-91dd-245763bfa04b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-575cc5b957-jtdvz in out of cluster comm: pod "oauth-openshift-575cc5b957-jtdvz" not found Dec 06 05:26:01 crc kubenswrapper[4706]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 06 05:26:01 crc kubenswrapper[4706]: > pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:26:01 crc kubenswrapper[4706]: E1206 05:26:01.980513 4706 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Dec 06 05:26:01 crc kubenswrapper[4706]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-575cc5b957-jtdvz_openshift-authentication_22853a63-db92-4c54-91dd-245763bfa04b_0(0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81): error adding pod openshift-authentication_oauth-openshift-575cc5b957-jtdvz to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81" Netns:"/var/run/netns/56b43957-55ea-4381-9537-85bc2fc01448" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-575cc5b957-jtdvz;K8S_POD_INFRA_CONTAINER_ID=0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81;K8S_POD_UID=22853a63-db92-4c54-91dd-245763bfa04b" Path:"" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-575cc5b957-jtdvz] networking: Multus: [openshift-authentication/oauth-openshift-575cc5b957-jtdvz/22853a63-db92-4c54-91dd-245763bfa04b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-575cc5b957-jtdvz in out of cluster comm: pod "oauth-openshift-575cc5b957-jtdvz" not found Dec 06 05:26:01 crc kubenswrapper[4706]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Dec 06 05:26:01 crc kubenswrapper[4706]: > pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:26:01 crc kubenswrapper[4706]: E1206 05:26:01.980591 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"oauth-openshift-575cc5b957-jtdvz_openshift-authentication(22853a63-db92-4c54-91dd-245763bfa04b)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"oauth-openshift-575cc5b957-jtdvz_openshift-authentication(22853a63-db92-4c54-91dd-245763bfa04b)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_oauth-openshift-575cc5b957-jtdvz_openshift-authentication_22853a63-db92-4c54-91dd-245763bfa04b_0(0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81): error adding pod openshift-authentication_oauth-openshift-575cc5b957-jtdvz to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81\\\" Netns:\\\"/var/run/netns/56b43957-55ea-4381-9537-85bc2fc01448\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-authentication;K8S_POD_NAME=oauth-openshift-575cc5b957-jtdvz;K8S_POD_INFRA_CONTAINER_ID=0bac171453664c0217fc348806c74899e7b2e043f9bb95d889e0d33fa08ebc81;K8S_POD_UID=22853a63-db92-4c54-91dd-245763bfa04b\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-authentication/oauth-openshift-575cc5b957-jtdvz] networking: Multus: [openshift-authentication/oauth-openshift-575cc5b957-jtdvz/22853a63-db92-4c54-91dd-245763bfa04b]: error setting the networks status, pod was already deleted: SetPodNetworkStatusAnnotation: failed to query the pod oauth-openshift-575cc5b957-jtdvz in out of cluster comm: pod \\\"oauth-openshift-575cc5b957-jtdvz\\\" not found\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" podUID="22853a63-db92-4c54-91dd-245763bfa04b" Dec 06 05:26:02 crc kubenswrapper[4706]: I1206 05:26:02.114004 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Dec 06 05:26:02 crc kubenswrapper[4706]: I1206 05:26:02.202546 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Dec 06 05:26:02 crc kubenswrapper[4706]: I1206 05:26:02.362418 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Dec 06 05:26:02 crc kubenswrapper[4706]: I1206 05:26:02.500934 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Dec 06 05:26:02 crc kubenswrapper[4706]: I1206 05:26:02.653421 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Dec 06 05:26:02 crc kubenswrapper[4706]: I1206 05:26:02.843853 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.006783 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.108432 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.250575 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.462849 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.463802 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.524455 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.570855 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.688613 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.784762 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.829534 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.915877 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.961486 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Dec 06 05:26:03 crc kubenswrapper[4706]: I1206 05:26:03.994800 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 06 05:26:04 crc kubenswrapper[4706]: I1206 05:26:04.190294 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Dec 06 05:26:04 crc kubenswrapper[4706]: I1206 05:26:04.196538 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Dec 06 05:26:04 crc kubenswrapper[4706]: I1206 05:26:04.225738 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Dec 06 05:26:04 crc kubenswrapper[4706]: I1206 05:26:04.243518 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Dec 06 05:26:04 crc kubenswrapper[4706]: I1206 05:26:04.914604 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.226856 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.400819 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.438477 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.550750 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.609727 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.623620 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.657499 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.777143 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.786124 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.795766 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.961265 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:26:05 crc kubenswrapper[4706]: I1206 05:26:05.961414 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.234140 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.318591 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.369958 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.533727 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.592645 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.595006 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.628181 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.697202 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.773787 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.891650 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.895655 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:26:06 crc kubenswrapper[4706]: I1206 05:26:06.993706 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.268522 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.293988 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.507486 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.564621 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.658475 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.853849 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Dec 06 05:26:07 crc kubenswrapper[4706]: I1206 05:26:07.984502 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.233383 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.235021 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.501414 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.515613 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.515791 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.623500 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.717548 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.718933 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Dec 06 05:26:08 crc kubenswrapper[4706]: I1206 05:26:08.759810 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Dec 06 05:26:09 crc kubenswrapper[4706]: I1206 05:26:09.077874 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Dec 06 05:26:09 crc kubenswrapper[4706]: I1206 05:26:09.184010 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 06 05:26:09 crc kubenswrapper[4706]: I1206 05:26:09.534796 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Dec 06 05:26:09 crc kubenswrapper[4706]: I1206 05:26:09.541121 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Dec 06 05:26:09 crc kubenswrapper[4706]: I1206 05:26:09.672109 4706 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 06 05:26:09 crc kubenswrapper[4706]: I1206 05:26:09.866413 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Dec 06 05:26:10 crc kubenswrapper[4706]: I1206 05:26:10.012678 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2" gracePeriod=5 Dec 06 05:26:10 crc kubenswrapper[4706]: I1206 05:26:10.159305 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Dec 06 05:26:10 crc kubenswrapper[4706]: I1206 05:26:10.317251 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Dec 06 05:26:10 crc kubenswrapper[4706]: I1206 05:26:10.659261 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Dec 06 05:26:10 crc kubenswrapper[4706]: I1206 05:26:10.913096 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Dec 06 05:26:10 crc kubenswrapper[4706]: I1206 05:26:10.979641 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Dec 06 05:26:11 crc kubenswrapper[4706]: I1206 05:26:11.327618 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Dec 06 05:26:11 crc kubenswrapper[4706]: I1206 05:26:11.406201 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Dec 06 05:26:11 crc kubenswrapper[4706]: I1206 05:26:11.437991 4706 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Dec 06 05:26:11 crc kubenswrapper[4706]: I1206 05:26:11.885818 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Dec 06 05:26:11 crc kubenswrapper[4706]: I1206 05:26:11.943723 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Dec 06 05:26:12 crc kubenswrapper[4706]: I1206 05:26:12.043024 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Dec 06 05:26:12 crc kubenswrapper[4706]: I1206 05:26:12.627778 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.037826 4706 generic.go:334] "Generic (PLEG): container finished" podID="e9405376-0114-4bee-b245-f17b30f2594a" containerID="128a6e82768f0cec748a1a7c63155a2de1ce3d17db05d8e9bc6454ebf5d1e6c3" exitCode=0 Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.037896 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" event={"ID":"e9405376-0114-4bee-b245-f17b30f2594a","Type":"ContainerDied","Data":"128a6e82768f0cec748a1a7c63155a2de1ce3d17db05d8e9bc6454ebf5d1e6c3"} Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.038575 4706 scope.go:117] "RemoveContainer" containerID="128a6e82768f0cec748a1a7c63155a2de1ce3d17db05d8e9bc6454ebf5d1e6c3" Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.049015 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.071570 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.481369 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Dec 06 05:26:13 crc kubenswrapper[4706]: I1206 05:26:13.739844 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Dec 06 05:26:14 crc kubenswrapper[4706]: I1206 05:26:14.015623 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Dec 06 05:26:14 crc kubenswrapper[4706]: I1206 05:26:14.411438 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Dec 06 05:26:14 crc kubenswrapper[4706]: I1206 05:26:14.715824 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.051829 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.054983 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" event={"ID":"e9405376-0114-4bee-b245-f17b30f2594a","Type":"ContainerStarted","Data":"ba70467ae38738266184ad8e81351f9df2124d6019a72d305a83293933d793c6"} Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.055407 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.055838 4706 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-xptzp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" start-of-body= Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.055890 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.36:8080/healthz\": dial tcp 10.217.0.36:8080: connect: connection refused" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.077061 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.090678 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.202792 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.601394 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.601480 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.702268 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.783782 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.783844 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.783873 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.783920 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.783966 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.784229 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.784269 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.784288 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.784675 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.791624 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.817361 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.832571 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.885738 4706 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.885778 4706 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.885790 4706 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.885801 4706 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Dec 06 05:26:15 crc kubenswrapper[4706]: I1206 05:26:15.885812 4706 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.043798 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.044309 4706 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.054619 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.054667 4706 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ab744dac-39c6-4811-b731-60bd7ab6f360" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.060420 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.060464 4706 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="ab744dac-39c6-4811-b731-60bd7ab6f360" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.062627 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.062689 4706 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2" exitCode=137 Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.062792 4706 scope.go:117] "RemoveContainer" containerID="31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.062795 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.069502 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.080384 4706 scope.go:117] "RemoveContainer" containerID="31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2" Dec 06 05:26:16 crc kubenswrapper[4706]: E1206 05:26:16.080887 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2\": container with ID starting with 31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2 not found: ID does not exist" containerID="31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.080929 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2"} err="failed to get container status \"31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2\": rpc error: code = NotFound desc = could not find container \"31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2\": container with ID starting with 31ac2e1f79f5357becc5e25f15a7486781163702291fb725aa6c258f89467ba2 not found: ID does not exist" Dec 06 05:26:16 crc kubenswrapper[4706]: I1206 05:26:16.954838 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Dec 06 05:26:17 crc kubenswrapper[4706]: I1206 05:26:17.035299 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:26:17 crc kubenswrapper[4706]: I1206 05:26:17.035927 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:26:17 crc kubenswrapper[4706]: I1206 05:26:17.236477 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-575cc5b957-jtdvz"] Dec 06 05:26:17 crc kubenswrapper[4706]: W1206 05:26:17.246267 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod22853a63_db92_4c54_91dd_245763bfa04b.slice/crio-3682a206b50763924d658fd66a55ebf37bce8a148608d43f298be467bb82c571 WatchSource:0}: Error finding container 3682a206b50763924d658fd66a55ebf37bce8a148608d43f298be467bb82c571: Status 404 returned error can't find the container with id 3682a206b50763924d658fd66a55ebf37bce8a148608d43f298be467bb82c571 Dec 06 05:26:17 crc kubenswrapper[4706]: I1206 05:26:17.530213 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.078135 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" event={"ID":"22853a63-db92-4c54-91dd-245763bfa04b","Type":"ContainerStarted","Data":"af3e698adab691fb41c89971c3e2a7d100881912c213bfdaa91212d65d46d787"} Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.078182 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" event={"ID":"22853a63-db92-4c54-91dd-245763bfa04b","Type":"ContainerStarted","Data":"3682a206b50763924d658fd66a55ebf37bce8a148608d43f298be467bb82c571"} Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.078408 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.083662 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.098276 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-575cc5b957-jtdvz" podStartSLOduration=101.098255847 podStartE2EDuration="1m41.098255847s" podCreationTimestamp="2025-12-06 05:24:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:26:18.093930111 +0000 UTC m=+400.421754065" watchObservedRunningTime="2025-12-06 05:26:18.098255847 +0000 UTC m=+400.426079791" Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.686081 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Dec 06 05:26:18 crc kubenswrapper[4706]: I1206 05:26:18.785371 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Dec 06 05:26:19 crc kubenswrapper[4706]: I1206 05:26:19.160942 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Dec 06 05:26:19 crc kubenswrapper[4706]: I1206 05:26:19.320446 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Dec 06 05:26:21 crc kubenswrapper[4706]: I1206 05:26:21.109718 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Dec 06 05:26:35 crc kubenswrapper[4706]: I1206 05:26:35.962212 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:26:35 crc kubenswrapper[4706]: I1206 05:26:35.963237 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.136382 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pmvgs"] Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.137452 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" podUID="4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" containerName="controller-manager" containerID="cri-o://939ce110ecb14d71635d7de21702ccd5ad434c7ac6cd72e9a3a20104b21e806e" gracePeriod=30 Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.226950 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph"] Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.227675 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" containerID="cri-o://9dd5abb96dc7d7756c8b52bdc845b1deeb2c31a44e5990ce12bcd854601d538d" gracePeriod=30 Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.440994 4706 generic.go:334] "Generic (PLEG): container finished" podID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerID="9dd5abb96dc7d7756c8b52bdc845b1deeb2c31a44e5990ce12bcd854601d538d" exitCode=0 Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.441077 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" event={"ID":"6b76376c-f080-4458-a87a-84eab1e4b86d","Type":"ContainerDied","Data":"9dd5abb96dc7d7756c8b52bdc845b1deeb2c31a44e5990ce12bcd854601d538d"} Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.442799 4706 generic.go:334] "Generic (PLEG): container finished" podID="4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" containerID="939ce110ecb14d71635d7de21702ccd5ad434c7ac6cd72e9a3a20104b21e806e" exitCode=0 Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.442847 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" event={"ID":"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077","Type":"ContainerDied","Data":"939ce110ecb14d71635d7de21702ccd5ad434c7ac6cd72e9a3a20104b21e806e"} Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.961665 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.962129 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.962175 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.963083 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a50b611b00cc5b19681640fa0163c59ec199ee057feb6e3aa5bd246ae8a33948"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.963135 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://a50b611b00cc5b19681640fa0163c59ec199ee057feb6e3aa5bd246ae8a33948" gracePeriod=600 Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.976774 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwthx"] Dec 06 05:27:05 crc kubenswrapper[4706]: I1206 05:27:05.977124 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dwthx" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="registry-server" containerID="cri-o://f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c" gracePeriod=2 Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.092247 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.173422 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r2clj"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.173742 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r2clj" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="registry-server" containerID="cri-o://5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10" gracePeriod=2 Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.211395 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.260328 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-client-ca\") pod \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.266740 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-serving-cert\") pod \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.266866 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-config\") pod \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.266949 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4btx\" (UniqueName: \"kubernetes.io/projected/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-kube-api-access-z4btx\") pod \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.267000 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-proxy-ca-bundles\") pod \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\" (UID: \"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.267661 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-client-ca" (OuterVolumeSpecName: "client-ca") pod "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" (UID: "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.267880 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-config" (OuterVolumeSpecName: "config") pod "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" (UID: "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.268557 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" (UID: "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.275946 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-kube-api-access-z4btx" (OuterVolumeSpecName: "kube-api-access-z4btx") pod "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" (UID: "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077"). InnerVolumeSpecName "kube-api-access-z4btx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.276411 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" (UID: "4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.368098 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-client-ca\") pod \"6b76376c-f080-4458-a87a-84eab1e4b86d\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.368587 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-config\") pod \"6b76376c-f080-4458-a87a-84eab1e4b86d\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.368640 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b76376c-f080-4458-a87a-84eab1e4b86d-serving-cert\") pod \"6b76376c-f080-4458-a87a-84eab1e4b86d\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.368669 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp268\" (UniqueName: \"kubernetes.io/projected/6b76376c-f080-4458-a87a-84eab1e4b86d-kube-api-access-kp268\") pod \"6b76376c-f080-4458-a87a-84eab1e4b86d\" (UID: \"6b76376c-f080-4458-a87a-84eab1e4b86d\") " Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.368930 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.368965 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-client-ca" (OuterVolumeSpecName: "client-ca") pod "6b76376c-f080-4458-a87a-84eab1e4b86d" (UID: "6b76376c-f080-4458-a87a-84eab1e4b86d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.369359 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-config" (OuterVolumeSpecName: "config") pod "6b76376c-f080-4458-a87a-84eab1e4b86d" (UID: "6b76376c-f080-4458-a87a-84eab1e4b86d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.369993 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.370026 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4btx\" (UniqueName: \"kubernetes.io/projected/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-kube-api-access-z4btx\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.370105 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.370125 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.373688 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b76376c-f080-4458-a87a-84eab1e4b86d-kube-api-access-kp268" (OuterVolumeSpecName: "kube-api-access-kp268") pod "6b76376c-f080-4458-a87a-84eab1e4b86d" (UID: "6b76376c-f080-4458-a87a-84eab1e4b86d"). InnerVolumeSpecName "kube-api-access-kp268". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.374260 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b76376c-f080-4458-a87a-84eab1e4b86d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6b76376c-f080-4458-a87a-84eab1e4b86d" (UID: "6b76376c-f080-4458-a87a-84eab1e4b86d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.451343 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" event={"ID":"4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077","Type":"ContainerDied","Data":"94a430b52caaabe00b9c80646b9e3420fd831cf54f69b6c04ce0fa2d65c1a9a8"} Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.451395 4706 scope.go:117] "RemoveContainer" containerID="939ce110ecb14d71635d7de21702ccd5ad434c7ac6cd72e9a3a20104b21e806e" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.451704 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-pmvgs" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.453796 4706 generic.go:334] "Generic (PLEG): container finished" podID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerID="f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c" exitCode=0 Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.453868 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerDied","Data":"f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c"} Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.457085 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="a50b611b00cc5b19681640fa0163c59ec199ee057feb6e3aa5bd246ae8a33948" exitCode=0 Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.457151 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"a50b611b00cc5b19681640fa0163c59ec199ee057feb6e3aa5bd246ae8a33948"} Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.458555 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" event={"ID":"6b76376c-f080-4458-a87a-84eab1e4b86d","Type":"ContainerDied","Data":"0fa95df1eb46491aad1f938a7804239b8ac2fc4cc48e9663d38a0fc5c43aea18"} Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.458647 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.471669 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.471696 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b76376c-f080-4458-a87a-84eab1e4b86d-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.471707 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6b76376c-f080-4458-a87a-84eab1e4b86d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.471716 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp268\" (UniqueName: \"kubernetes.io/projected/6b76376c-f080-4458-a87a-84eab1e4b86d-kube-api-access-kp268\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.474741 4706 scope.go:117] "RemoveContainer" containerID="f6f5a1a1f15d769ec15e828053fb56eb73a83655194601c517a659f819d2953e" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.483539 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pmvgs"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.487780 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-pmvgs"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.492200 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.495962 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-jslph"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.652668 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-565f8b58b8-74k75"] Dec 06 05:27:06 crc kubenswrapper[4706]: E1206 05:27:06.653230 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.653304 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" Dec 06 05:27:06 crc kubenswrapper[4706]: E1206 05:27:06.653377 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" containerName="controller-manager" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.653432 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" containerName="controller-manager" Dec 06 05:27:06 crc kubenswrapper[4706]: E1206 05:27:06.653500 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.653561 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.653716 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" containerName="controller-manager" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.653787 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" containerName="route-controller-manager" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.653850 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.654341 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.656058 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.656709 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.659702 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.659851 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.660259 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.660554 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.660672 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.660780 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.660887 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.660976 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.661080 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.661436 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.661531 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.661634 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.666701 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.671502 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-565f8b58b8-74k75"] Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.759996 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-proxy-ca-bundles\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760064 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-config\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760091 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x65c8\" (UniqueName: \"kubernetes.io/projected/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-kube-api-access-x65c8\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760116 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2be3fde-71b1-4b34-9179-52ca2861cb46-serving-cert\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760132 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-config\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760150 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-client-ca\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760188 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-client-ca\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760209 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv5jp\" (UniqueName: \"kubernetes.io/projected/f2be3fde-71b1-4b34-9179-52ca2861cb46-kube-api-access-bv5jp\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.760230 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-serving-cert\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.761789 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.860887 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-proxy-ca-bundles\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.860954 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-config\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.860985 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x65c8\" (UniqueName: \"kubernetes.io/projected/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-kube-api-access-x65c8\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.861010 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2be3fde-71b1-4b34-9179-52ca2861cb46-serving-cert\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.861030 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-config\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.861063 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-client-ca\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.861093 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-client-ca\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.861119 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv5jp\" (UniqueName: \"kubernetes.io/projected/f2be3fde-71b1-4b34-9179-52ca2861cb46-kube-api-access-bv5jp\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.861138 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-serving-cert\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.862280 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-client-ca\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.862299 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-client-ca\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.862278 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-proxy-ca-bundles\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.862523 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-config\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.862817 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-config\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.864547 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-serving-cert\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.865355 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2be3fde-71b1-4b34-9179-52ca2861cb46-serving-cert\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.877875 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv5jp\" (UniqueName: \"kubernetes.io/projected/f2be3fde-71b1-4b34-9179-52ca2861cb46-kube-api-access-bv5jp\") pod \"route-controller-manager-7948c9bcdd-plbvw\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:06 crc kubenswrapper[4706]: I1206 05:27:06.878496 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x65c8\" (UniqueName: \"kubernetes.io/projected/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-kube-api-access-x65c8\") pod \"controller-manager-565f8b58b8-74k75\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.110078 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.115253 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.360652 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw"] Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.437548 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-565f8b58b8-74k75"] Dec 06 05:27:07 crc kubenswrapper[4706]: W1206 05:27:07.447569 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f5b7e23_6e82_4bbe_a0b3_c4ab279acf6d.slice/crio-21cbdd034e32bc12dd6981a8ec0dcb9e4fe21c81426437f06d34e31408fe783a WatchSource:0}: Error finding container 21cbdd034e32bc12dd6981a8ec0dcb9e4fe21c81426437f06d34e31408fe783a: Status 404 returned error can't find the container with id 21cbdd034e32bc12dd6981a8ec0dcb9e4fe21c81426437f06d34e31408fe783a Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.465192 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"d41cbbb0ceb6ccc8501ce4b75011f83163d456684ff13944b7d6b7c128f476e3"} Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.465996 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" event={"ID":"f2be3fde-71b1-4b34-9179-52ca2861cb46","Type":"ContainerStarted","Data":"0b3141a579cdcb0eb390c55bd638be5ea9a5f1d4452275af182e5afe57a07808"} Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.468911 4706 generic.go:334] "Generic (PLEG): container finished" podID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerID="5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10" exitCode=0 Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.468950 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerDied","Data":"5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10"} Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.469727 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" event={"ID":"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d","Type":"ContainerStarted","Data":"21cbdd034e32bc12dd6981a8ec0dcb9e4fe21c81426437f06d34e31408fe783a"} Dec 06 05:27:07 crc kubenswrapper[4706]: I1206 05:27:07.734192 4706 scope.go:117] "RemoveContainer" containerID="9dd5abb96dc7d7756c8b52bdc845b1deeb2c31a44e5990ce12bcd854601d538d" Dec 06 05:27:08 crc kubenswrapper[4706]: I1206 05:27:08.053666 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077" path="/var/lib/kubelet/pods/4dfbfadd-8faa-4b55-b8a4-5b5bf8e5c077/volumes" Dec 06 05:27:08 crc kubenswrapper[4706]: I1206 05:27:08.054476 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b76376c-f080-4458-a87a-84eab1e4b86d" path="/var/lib/kubelet/pods/6b76376c-f080-4458-a87a-84eab1e4b86d/volumes" Dec 06 05:27:08 crc kubenswrapper[4706]: E1206 05:27:08.628517 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c is running failed: container process not found" containerID="f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:27:08 crc kubenswrapper[4706]: E1206 05:27:08.629254 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c is running failed: container process not found" containerID="f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:27:08 crc kubenswrapper[4706]: E1206 05:27:08.629727 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c is running failed: container process not found" containerID="f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:27:08 crc kubenswrapper[4706]: E1206 05:27:08.629779 4706 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-dwthx" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="registry-server" Dec 06 05:27:08 crc kubenswrapper[4706]: I1206 05:27:08.881434 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-565f8b58b8-74k75"] Dec 06 05:27:08 crc kubenswrapper[4706]: I1206 05:27:08.885502 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw"] Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.455198 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.497598 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dwthx" event={"ID":"cf5c1feb-f09b-41c2-9974-56538ccc281f","Type":"ContainerDied","Data":"9bed54aa84ef664d23bd59f8630b572457303dfcf83bdee0fd2fc918869dd22d"} Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.497660 4706 scope.go:117] "RemoveContainer" containerID="f4ac9748d2b4ef06e2f325acd0ada24c069adcd75625e4a6d506e9752d27010c" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.497821 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dwthx" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.521066 4706 scope.go:117] "RemoveContainer" containerID="db2cb9c9f79e99b736443dd2bcbab250397fb94041572c0a35cc366776112e58" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.536546 4706 scope.go:117] "RemoveContainer" containerID="61257864046feff646caad462392480e6d5a416200b3db0009bd6a207e692d68" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.601762 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-utilities\") pod \"cf5c1feb-f09b-41c2-9974-56538ccc281f\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.601844 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngsgc\" (UniqueName: \"kubernetes.io/projected/cf5c1feb-f09b-41c2-9974-56538ccc281f-kube-api-access-ngsgc\") pod \"cf5c1feb-f09b-41c2-9974-56538ccc281f\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.601951 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-catalog-content\") pod \"cf5c1feb-f09b-41c2-9974-56538ccc281f\" (UID: \"cf5c1feb-f09b-41c2-9974-56538ccc281f\") " Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.602865 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-utilities" (OuterVolumeSpecName: "utilities") pod "cf5c1feb-f09b-41c2-9974-56538ccc281f" (UID: "cf5c1feb-f09b-41c2-9974-56538ccc281f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.607813 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf5c1feb-f09b-41c2-9974-56538ccc281f-kube-api-access-ngsgc" (OuterVolumeSpecName: "kube-api-access-ngsgc") pod "cf5c1feb-f09b-41c2-9974-56538ccc281f" (UID: "cf5c1feb-f09b-41c2-9974-56538ccc281f"). InnerVolumeSpecName "kube-api-access-ngsgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:09 crc kubenswrapper[4706]: E1206 05:27:09.608588 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10 is running failed: container process not found" containerID="5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:27:09 crc kubenswrapper[4706]: E1206 05:27:09.609243 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10 is running failed: container process not found" containerID="5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:27:09 crc kubenswrapper[4706]: E1206 05:27:09.609863 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10 is running failed: container process not found" containerID="5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10" cmd=["grpc_health_probe","-addr=:50051"] Dec 06 05:27:09 crc kubenswrapper[4706]: E1206 05:27:09.609946 4706 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-r2clj" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="registry-server" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.634416 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf5c1feb-f09b-41c2-9974-56538ccc281f" (UID: "cf5c1feb-f09b-41c2-9974-56538ccc281f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.703821 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.703849 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf5c1feb-f09b-41c2-9974-56538ccc281f-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.703860 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngsgc\" (UniqueName: \"kubernetes.io/projected/cf5c1feb-f09b-41c2-9974-56538ccc281f-kube-api-access-ngsgc\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.826275 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwthx"] Dec 06 05:27:09 crc kubenswrapper[4706]: I1206 05:27:09.832358 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dwthx"] Dec 06 05:27:10 crc kubenswrapper[4706]: I1206 05:27:10.048938 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" path="/var/lib/kubelet/pods/cf5c1feb-f09b-41c2-9974-56538ccc281f/volumes" Dec 06 05:27:11 crc kubenswrapper[4706]: I1206 05:27:11.768635 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:27:11 crc kubenswrapper[4706]: I1206 05:27:11.931996 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-catalog-content\") pod \"52e328e7-19c9-4412-96f0-582cd5add7c5\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " Dec 06 05:27:11 crc kubenswrapper[4706]: I1206 05:27:11.932073 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-utilities\") pod \"52e328e7-19c9-4412-96f0-582cd5add7c5\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " Dec 06 05:27:11 crc kubenswrapper[4706]: I1206 05:27:11.932153 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89xk8\" (UniqueName: \"kubernetes.io/projected/52e328e7-19c9-4412-96f0-582cd5add7c5-kube-api-access-89xk8\") pod \"52e328e7-19c9-4412-96f0-582cd5add7c5\" (UID: \"52e328e7-19c9-4412-96f0-582cd5add7c5\") " Dec 06 05:27:11 crc kubenswrapper[4706]: I1206 05:27:11.934137 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-utilities" (OuterVolumeSpecName: "utilities") pod "52e328e7-19c9-4412-96f0-582cd5add7c5" (UID: "52e328e7-19c9-4412-96f0-582cd5add7c5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:11 crc kubenswrapper[4706]: I1206 05:27:11.936563 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52e328e7-19c9-4412-96f0-582cd5add7c5-kube-api-access-89xk8" (OuterVolumeSpecName: "kube-api-access-89xk8") pod "52e328e7-19c9-4412-96f0-582cd5add7c5" (UID: "52e328e7-19c9-4412-96f0-582cd5add7c5"). InnerVolumeSpecName "kube-api-access-89xk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.033845 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.034232 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89xk8\" (UniqueName: \"kubernetes.io/projected/52e328e7-19c9-4412-96f0-582cd5add7c5-kube-api-access-89xk8\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.056823 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "52e328e7-19c9-4412-96f0-582cd5add7c5" (UID: "52e328e7-19c9-4412-96f0-582cd5add7c5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.134958 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/52e328e7-19c9-4412-96f0-582cd5add7c5-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.519998 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r2clj" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.519996 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r2clj" event={"ID":"52e328e7-19c9-4412-96f0-582cd5add7c5","Type":"ContainerDied","Data":"2f7012bed2d7108013af6192deca337367240d4c707d579a0fdcfa92d0671509"} Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.520565 4706 scope.go:117] "RemoveContainer" containerID="5698dbacd7c2111e8f308ce87325b1cc62f9aaa78b83be6593dfc129551b2c10" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.544133 4706 scope.go:117] "RemoveContainer" containerID="ea614a3bb659c8af872ae774c19993e551d90992b3e281ddcf9f6598e67abf98" Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.568321 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r2clj"] Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.576859 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r2clj"] Dec 06 05:27:12 crc kubenswrapper[4706]: I1206 05:27:12.586686 4706 scope.go:117] "RemoveContainer" containerID="9edf981cdcd123f3434bfef0dcf6280fd6948a0ca19ee573a57a44d4c5d73df7" Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.527425 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" event={"ID":"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d","Type":"ContainerStarted","Data":"4054a8efbfc53b05a87239e319487131e1eea38db8e08ee2bfcc5227cc2b1502"} Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.528540 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" event={"ID":"f2be3fde-71b1-4b34-9179-52ca2861cb46","Type":"ContainerStarted","Data":"aa57bec4f0bcbaa06ef2bff7c72c9397db58a1004d7d81a4a0054f564a524be0"} Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.528661 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" podUID="f2be3fde-71b1-4b34-9179-52ca2861cb46" containerName="route-controller-manager" containerID="cri-o://aa57bec4f0bcbaa06ef2bff7c72c9397db58a1004d7d81a4a0054f564a524be0" gracePeriod=30 Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.528831 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.529326 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" podUID="5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" containerName="controller-manager" containerID="cri-o://4054a8efbfc53b05a87239e319487131e1eea38db8e08ee2bfcc5227cc2b1502" gracePeriod=30 Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.529434 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.535203 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.537026 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.553612 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" podStartSLOduration=8.553590597 podStartE2EDuration="8.553590597s" podCreationTimestamp="2025-12-06 05:27:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:13.547066033 +0000 UTC m=+455.874889997" watchObservedRunningTime="2025-12-06 05:27:13.553590597 +0000 UTC m=+455.881414551" Dec 06 05:27:13 crc kubenswrapper[4706]: I1206 05:27:13.608808 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" podStartSLOduration=8.608786106 podStartE2EDuration="8.608786106s" podCreationTimestamp="2025-12-06 05:27:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:13.607581474 +0000 UTC m=+455.935405418" watchObservedRunningTime="2025-12-06 05:27:13.608786106 +0000 UTC m=+455.936610050" Dec 06 05:27:14 crc kubenswrapper[4706]: I1206 05:27:14.048029 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" path="/var/lib/kubelet/pods/52e328e7-19c9-4412-96f0-582cd5add7c5/volumes" Dec 06 05:27:14 crc kubenswrapper[4706]: I1206 05:27:14.539483 4706 generic.go:334] "Generic (PLEG): container finished" podID="f2be3fde-71b1-4b34-9179-52ca2861cb46" containerID="aa57bec4f0bcbaa06ef2bff7c72c9397db58a1004d7d81a4a0054f564a524be0" exitCode=0 Dec 06 05:27:14 crc kubenswrapper[4706]: I1206 05:27:14.539633 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" event={"ID":"f2be3fde-71b1-4b34-9179-52ca2861cb46","Type":"ContainerDied","Data":"aa57bec4f0bcbaa06ef2bff7c72c9397db58a1004d7d81a4a0054f564a524be0"} Dec 06 05:27:14 crc kubenswrapper[4706]: I1206 05:27:14.542643 4706 generic.go:334] "Generic (PLEG): container finished" podID="5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" containerID="4054a8efbfc53b05a87239e319487131e1eea38db8e08ee2bfcc5227cc2b1502" exitCode=0 Dec 06 05:27:14 crc kubenswrapper[4706]: I1206 05:27:14.542710 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" event={"ID":"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d","Type":"ContainerDied","Data":"4054a8efbfc53b05a87239e319487131e1eea38db8e08ee2bfcc5227cc2b1502"} Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.351675 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.416206 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.475891 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-client-ca\") pod \"f2be3fde-71b1-4b34-9179-52ca2861cb46\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.475984 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bv5jp\" (UniqueName: \"kubernetes.io/projected/f2be3fde-71b1-4b34-9179-52ca2861cb46-kube-api-access-bv5jp\") pod \"f2be3fde-71b1-4b34-9179-52ca2861cb46\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.476052 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2be3fde-71b1-4b34-9179-52ca2861cb46-serving-cert\") pod \"f2be3fde-71b1-4b34-9179-52ca2861cb46\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.476159 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-config\") pod \"f2be3fde-71b1-4b34-9179-52ca2861cb46\" (UID: \"f2be3fde-71b1-4b34-9179-52ca2861cb46\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.476811 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-client-ca" (OuterVolumeSpecName: "client-ca") pod "f2be3fde-71b1-4b34-9179-52ca2861cb46" (UID: "f2be3fde-71b1-4b34-9179-52ca2861cb46"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.476848 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-config" (OuterVolumeSpecName: "config") pod "f2be3fde-71b1-4b34-9179-52ca2861cb46" (UID: "f2be3fde-71b1-4b34-9179-52ca2861cb46"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.481778 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2be3fde-71b1-4b34-9179-52ca2861cb46-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f2be3fde-71b1-4b34-9179-52ca2861cb46" (UID: "f2be3fde-71b1-4b34-9179-52ca2861cb46"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.481786 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2be3fde-71b1-4b34-9179-52ca2861cb46-kube-api-access-bv5jp" (OuterVolumeSpecName: "kube-api-access-bv5jp") pod "f2be3fde-71b1-4b34-9179-52ca2861cb46" (UID: "f2be3fde-71b1-4b34-9179-52ca2861cb46"). InnerVolumeSpecName "kube-api-access-bv5jp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.553394 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" event={"ID":"f2be3fde-71b1-4b34-9179-52ca2861cb46","Type":"ContainerDied","Data":"0b3141a579cdcb0eb390c55bd638be5ea9a5f1d4452275af182e5afe57a07808"} Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.553430 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.553465 4706 scope.go:117] "RemoveContainer" containerID="aa57bec4f0bcbaa06ef2bff7c72c9397db58a1004d7d81a4a0054f564a524be0" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.557069 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" event={"ID":"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d","Type":"ContainerDied","Data":"21cbdd034e32bc12dd6981a8ec0dcb9e4fe21c81426437f06d34e31408fe783a"} Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.557154 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-565f8b58b8-74k75" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.574506 4706 scope.go:117] "RemoveContainer" containerID="4054a8efbfc53b05a87239e319487131e1eea38db8e08ee2bfcc5227cc2b1502" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.576823 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x65c8\" (UniqueName: \"kubernetes.io/projected/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-kube-api-access-x65c8\") pod \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.576873 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-serving-cert\") pod \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.576963 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-config\") pod \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.577070 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-proxy-ca-bundles\") pod \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.577097 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-client-ca\") pod \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\" (UID: \"5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d\") " Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.577301 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bv5jp\" (UniqueName: \"kubernetes.io/projected/f2be3fde-71b1-4b34-9179-52ca2861cb46-kube-api-access-bv5jp\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.577319 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2be3fde-71b1-4b34-9179-52ca2861cb46-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.577327 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.577339 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2be3fde-71b1-4b34-9179-52ca2861cb46-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.578176 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-client-ca" (OuterVolumeSpecName: "client-ca") pod "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" (UID: "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.578706 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" (UID: "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.579283 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-config" (OuterVolumeSpecName: "config") pod "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" (UID: "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.580611 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" (UID: "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.581342 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-kube-api-access-x65c8" (OuterVolumeSpecName: "kube-api-access-x65c8") pod "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" (UID: "5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d"). InnerVolumeSpecName "kube-api-access-x65c8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.588709 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw"] Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.591613 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7948c9bcdd-plbvw"] Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.679048 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x65c8\" (UniqueName: \"kubernetes.io/projected/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-kube-api-access-x65c8\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.679119 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.679132 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.679147 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.679201 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.898757 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-565f8b58b8-74k75"] Dec 06 05:27:15 crc kubenswrapper[4706]: I1206 05:27:15.901506 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-565f8b58b8-74k75"] Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.042568 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" path="/var/lib/kubelet/pods/5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d/volumes" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.043521 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2be3fde-71b1-4b34-9179-52ca2861cb46" path="/var/lib/kubelet/pods/f2be3fde-71b1-4b34-9179-52ca2861cb46/volumes" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657237 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d"] Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657785 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2be3fde-71b1-4b34-9179-52ca2861cb46" containerName="route-controller-manager" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657796 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2be3fde-71b1-4b34-9179-52ca2861cb46" containerName="route-controller-manager" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657807 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="registry-server" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657813 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="registry-server" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657829 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="extract-utilities" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657837 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="extract-utilities" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657848 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="registry-server" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657856 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="registry-server" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657872 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="extract-content" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657879 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="extract-content" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657888 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" containerName="controller-manager" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657894 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" containerName="controller-manager" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657902 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="extract-utilities" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657908 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="extract-utilities" Dec 06 05:27:16 crc kubenswrapper[4706]: E1206 05:27:16.657917 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="extract-content" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.657924 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="extract-content" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.658042 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2be3fde-71b1-4b34-9179-52ca2861cb46" containerName="route-controller-manager" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.658057 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f5b7e23-6e82-4bbe-a0b3-c4ab279acf6d" containerName="controller-manager" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.658066 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="52e328e7-19c9-4412-96f0-582cd5add7c5" containerName="registry-server" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.658092 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf5c1feb-f09b-41c2-9974-56538ccc281f" containerName="registry-server" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.658431 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.660633 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.661009 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.661454 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.661900 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.662288 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.665382 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.669665 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d"] Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.802654 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-client-ca\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.802741 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-config\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.802780 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-serving-cert\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.802903 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfstz\" (UniqueName: \"kubernetes.io/projected/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-kube-api-access-cfstz\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.904525 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-client-ca\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.904634 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-config\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.904684 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-serving-cert\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.904748 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfstz\" (UniqueName: \"kubernetes.io/projected/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-kube-api-access-cfstz\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.905893 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-client-ca\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.906221 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-config\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.913807 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-serving-cert\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.924725 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfstz\" (UniqueName: \"kubernetes.io/projected/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-kube-api-access-cfstz\") pod \"route-controller-manager-8b979b485-gpv8d\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:16 crc kubenswrapper[4706]: I1206 05:27:16.977290 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.177497 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d"] Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.575899 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" event={"ID":"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c","Type":"ContainerStarted","Data":"2bec4c1c5ab4625aa97e4d927e62e884240faeac191750a8d0d3f67fd26da9f9"} Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.658614 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-866ddd4d9c-kbc88"] Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.659220 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.661427 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.661779 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.664323 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.665233 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.665386 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.670914 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.671366 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.678697 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-866ddd4d9c-kbc88"] Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.816001 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-proxy-ca-bundles\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.816390 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-client-ca\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.816424 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4skgn\" (UniqueName: \"kubernetes.io/projected/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-kube-api-access-4skgn\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.816457 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-serving-cert\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.816478 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-config\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.917612 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-proxy-ca-bundles\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.917678 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-client-ca\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.917706 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4skgn\" (UniqueName: \"kubernetes.io/projected/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-kube-api-access-4skgn\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.917736 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-serving-cert\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.917759 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-config\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.918875 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-proxy-ca-bundles\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.919026 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-config\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.919115 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-client-ca\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.925589 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-serving-cert\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.940327 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4skgn\" (UniqueName: \"kubernetes.io/projected/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-kube-api-access-4skgn\") pod \"controller-manager-866ddd4d9c-kbc88\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:17 crc kubenswrapper[4706]: I1206 05:27:17.975207 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:18 crc kubenswrapper[4706]: I1206 05:27:18.442107 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-866ddd4d9c-kbc88"] Dec 06 05:27:18 crc kubenswrapper[4706]: W1206 05:27:18.447368 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa42cb7e_eb9f_45e6_bc43_f78740ab1726.slice/crio-9f841f4799748b565eb58fd81264e24c71551af83028853a0e9e52e86131759e WatchSource:0}: Error finding container 9f841f4799748b565eb58fd81264e24c71551af83028853a0e9e52e86131759e: Status 404 returned error can't find the container with id 9f841f4799748b565eb58fd81264e24c71551af83028853a0e9e52e86131759e Dec 06 05:27:18 crc kubenswrapper[4706]: I1206 05:27:18.583279 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" event={"ID":"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c","Type":"ContainerStarted","Data":"511e56896728a6f926837256a8ba920e4016ec8ddf5646ac9df8407daeb15edf"} Dec 06 05:27:18 crc kubenswrapper[4706]: I1206 05:27:18.583629 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:18 crc kubenswrapper[4706]: I1206 05:27:18.584508 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" event={"ID":"aa42cb7e-eb9f-45e6-bc43-f78740ab1726","Type":"ContainerStarted","Data":"9f841f4799748b565eb58fd81264e24c71551af83028853a0e9e52e86131759e"} Dec 06 05:27:18 crc kubenswrapper[4706]: I1206 05:27:18.588906 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:18 crc kubenswrapper[4706]: I1206 05:27:18.602316 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" podStartSLOduration=9.602295537 podStartE2EDuration="9.602295537s" podCreationTimestamp="2025-12-06 05:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:18.599587406 +0000 UTC m=+460.927411360" watchObservedRunningTime="2025-12-06 05:27:18.602295537 +0000 UTC m=+460.930119481" Dec 06 05:27:19 crc kubenswrapper[4706]: I1206 05:27:19.590765 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" event={"ID":"aa42cb7e-eb9f-45e6-bc43-f78740ab1726","Type":"ContainerStarted","Data":"310956616c06ac3ccd167a95cbb4280db097ab033e695a65e3d6d14c4f259bcf"} Dec 06 05:27:19 crc kubenswrapper[4706]: I1206 05:27:19.591218 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:19 crc kubenswrapper[4706]: I1206 05:27:19.595918 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:19 crc kubenswrapper[4706]: I1206 05:27:19.608494 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" podStartSLOduration=10.608470808 podStartE2EDuration="10.608470808s" podCreationTimestamp="2025-12-06 05:27:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:19.60782646 +0000 UTC m=+461.935650404" watchObservedRunningTime="2025-12-06 05:27:19.608470808 +0000 UTC m=+461.936294762" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.361967 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rrslb"] Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.364032 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.376779 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rrslb"] Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514313 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc8jg\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-kube-api-access-cc8jg\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514362 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-bound-sa-token\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514384 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-registry-tls\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514418 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ac55ea-69d7-4e0c-8c3e-510779641491-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514551 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac55ea-69d7-4e0c-8c3e-510779641491-trusted-ca\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514596 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ac55ea-69d7-4e0c-8c3e-510779641491-registry-certificates\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514639 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.514744 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ac55ea-69d7-4e0c-8c3e-510779641491-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.535378 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.615833 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc8jg\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-kube-api-access-cc8jg\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.615903 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-bound-sa-token\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.615934 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-registry-tls\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.615987 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ac55ea-69d7-4e0c-8c3e-510779641491-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.616016 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ac55ea-69d7-4e0c-8c3e-510779641491-registry-certificates\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.616036 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac55ea-69d7-4e0c-8c3e-510779641491-trusted-ca\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.616103 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ac55ea-69d7-4e0c-8c3e-510779641491-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.616713 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/90ac55ea-69d7-4e0c-8c3e-510779641491-ca-trust-extracted\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.617281 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/90ac55ea-69d7-4e0c-8c3e-510779641491-registry-certificates\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.619199 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/90ac55ea-69d7-4e0c-8c3e-510779641491-trusted-ca\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.621934 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/90ac55ea-69d7-4e0c-8c3e-510779641491-installation-pull-secrets\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.629266 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-registry-tls\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.632713 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc8jg\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-kube-api-access-cc8jg\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.635122 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/90ac55ea-69d7-4e0c-8c3e-510779641491-bound-sa-token\") pod \"image-registry-66df7c8f76-rrslb\" (UID: \"90ac55ea-69d7-4e0c-8c3e-510779641491\") " pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:42 crc kubenswrapper[4706]: I1206 05:27:42.686696 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:43 crc kubenswrapper[4706]: I1206 05:27:43.097602 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-rrslb"] Dec 06 05:27:43 crc kubenswrapper[4706]: I1206 05:27:43.746433 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" event={"ID":"90ac55ea-69d7-4e0c-8c3e-510779641491","Type":"ContainerStarted","Data":"3f2ffa865511433f30639f2d3e86e2fe7488278817815898d7c1f24cc6244cee"} Dec 06 05:27:43 crc kubenswrapper[4706]: I1206 05:27:43.746475 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" event={"ID":"90ac55ea-69d7-4e0c-8c3e-510779641491","Type":"ContainerStarted","Data":"bcc20438c4755ad134b219c53b06b6897578cf2289bc801710f39e29cd9d41da"} Dec 06 05:27:43 crc kubenswrapper[4706]: I1206 05:27:43.746570 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:27:43 crc kubenswrapper[4706]: I1206 05:27:43.769328 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" podStartSLOduration=1.769312635 podStartE2EDuration="1.769312635s" podCreationTimestamp="2025-12-06 05:27:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:43.767952409 +0000 UTC m=+486.095776393" watchObservedRunningTime="2025-12-06 05:27:43.769312635 +0000 UTC m=+486.097136579" Dec 06 05:27:45 crc kubenswrapper[4706]: I1206 05:27:45.116999 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-866ddd4d9c-kbc88"] Dec 06 05:27:45 crc kubenswrapper[4706]: I1206 05:27:45.117693 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" podUID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" containerName="controller-manager" containerID="cri-o://310956616c06ac3ccd167a95cbb4280db097ab033e695a65e3d6d14c4f259bcf" gracePeriod=30 Dec 06 05:27:45 crc kubenswrapper[4706]: I1206 05:27:45.139857 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d"] Dec 06 05:27:45 crc kubenswrapper[4706]: I1206 05:27:45.140188 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" podUID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" containerName="route-controller-manager" containerID="cri-o://511e56896728a6f926837256a8ba920e4016ec8ddf5646ac9df8407daeb15edf" gracePeriod=30 Dec 06 05:27:46 crc kubenswrapper[4706]: I1206 05:27:46.978467 4706 patch_prober.go:28] interesting pod/route-controller-manager-8b979b485-gpv8d container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" start-of-body= Dec 06 05:27:46 crc kubenswrapper[4706]: I1206 05:27:46.978876 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" podUID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.60:8443/healthz\": dial tcp 10.217.0.60:8443: connect: connection refused" Dec 06 05:27:47 crc kubenswrapper[4706]: I1206 05:27:47.766651 4706 generic.go:334] "Generic (PLEG): container finished" podID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" containerID="511e56896728a6f926837256a8ba920e4016ec8ddf5646ac9df8407daeb15edf" exitCode=0 Dec 06 05:27:47 crc kubenswrapper[4706]: I1206 05:27:47.766734 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" event={"ID":"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c","Type":"ContainerDied","Data":"511e56896728a6f926837256a8ba920e4016ec8ddf5646ac9df8407daeb15edf"} Dec 06 05:27:47 crc kubenswrapper[4706]: I1206 05:27:47.768678 4706 generic.go:334] "Generic (PLEG): container finished" podID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" containerID="310956616c06ac3ccd167a95cbb4280db097ab033e695a65e3d6d14c4f259bcf" exitCode=0 Dec 06 05:27:47 crc kubenswrapper[4706]: I1206 05:27:47.768709 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" event={"ID":"aa42cb7e-eb9f-45e6-bc43-f78740ab1726","Type":"ContainerDied","Data":"310956616c06ac3ccd167a95cbb4280db097ab033e695a65e3d6d14c4f259bcf"} Dec 06 05:27:47 crc kubenswrapper[4706]: I1206 05:27:47.976910 4706 patch_prober.go:28] interesting pod/controller-manager-866ddd4d9c-kbc88 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" start-of-body= Dec 06 05:27:47 crc kubenswrapper[4706]: I1206 05:27:47.976978 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" podUID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.61:8443/healthz\": dial tcp 10.217.0.61:8443: connect: connection refused" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.114628 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.125271 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.153756 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr"] Dec 06 05:27:49 crc kubenswrapper[4706]: E1206 05:27:49.154007 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" containerName="route-controller-manager" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.154022 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" containerName="route-controller-manager" Dec 06 05:27:49 crc kubenswrapper[4706]: E1206 05:27:49.154037 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" containerName="controller-manager" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.154066 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" containerName="controller-manager" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.154240 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" containerName="controller-manager" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.154258 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" containerName="route-controller-manager" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.154680 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.166449 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr"] Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223073 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfstz\" (UniqueName: \"kubernetes.io/projected/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-kube-api-access-cfstz\") pod \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223136 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-config\") pod \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223181 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-proxy-ca-bundles\") pod \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223231 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-serving-cert\") pod \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223250 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-config\") pod \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223263 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-serving-cert\") pod \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223665 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-client-ca\") pod \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223694 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4skgn\" (UniqueName: \"kubernetes.io/projected/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-kube-api-access-4skgn\") pod \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\" (UID: \"aa42cb7e-eb9f-45e6-bc43-f78740ab1726\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223761 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-client-ca\") pod \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\" (UID: \"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c\") " Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223916 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8b82\" (UniqueName: \"kubernetes.io/projected/ec148dc1-882a-465f-97a8-599a95131d6e-kube-api-access-v8b82\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.223950 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec148dc1-882a-465f-97a8-599a95131d6e-config\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.224110 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ec148dc1-882a-465f-97a8-599a95131d6e-client-ca\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.226371 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-client-ca" (OuterVolumeSpecName: "client-ca") pod "aa42cb7e-eb9f-45e6-bc43-f78740ab1726" (UID: "aa42cb7e-eb9f-45e6-bc43-f78740ab1726"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.226606 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec148dc1-882a-465f-97a8-599a95131d6e-serving-cert\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.226745 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.226872 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-client-ca" (OuterVolumeSpecName: "client-ca") pod "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" (UID: "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.227115 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-config" (OuterVolumeSpecName: "config") pod "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" (UID: "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.227098 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-config" (OuterVolumeSpecName: "config") pod "aa42cb7e-eb9f-45e6-bc43-f78740ab1726" (UID: "aa42cb7e-eb9f-45e6-bc43-f78740ab1726"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.228651 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "aa42cb7e-eb9f-45e6-bc43-f78740ab1726" (UID: "aa42cb7e-eb9f-45e6-bc43-f78740ab1726"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.236738 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" (UID: "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.236764 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-kube-api-access-cfstz" (OuterVolumeSpecName: "kube-api-access-cfstz") pod "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" (UID: "9f85a489-5b5f-43e2-8e22-6bed75ab5c6c"). InnerVolumeSpecName "kube-api-access-cfstz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.237003 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "aa42cb7e-eb9f-45e6-bc43-f78740ab1726" (UID: "aa42cb7e-eb9f-45e6-bc43-f78740ab1726"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.242519 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-kube-api-access-4skgn" (OuterVolumeSpecName: "kube-api-access-4skgn") pod "aa42cb7e-eb9f-45e6-bc43-f78740ab1726" (UID: "aa42cb7e-eb9f-45e6-bc43-f78740ab1726"). InnerVolumeSpecName "kube-api-access-4skgn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.327690 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec148dc1-882a-465f-97a8-599a95131d6e-serving-cert\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8b82\" (UniqueName: \"kubernetes.io/projected/ec148dc1-882a-465f-97a8-599a95131d6e-kube-api-access-v8b82\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328168 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec148dc1-882a-465f-97a8-599a95131d6e-config\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328207 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ec148dc1-882a-465f-97a8-599a95131d6e-client-ca\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328244 4706 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328255 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328265 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328276 4706 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328287 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4skgn\" (UniqueName: \"kubernetes.io/projected/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-kube-api-access-4skgn\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328296 4706 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-client-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328310 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfstz\" (UniqueName: \"kubernetes.io/projected/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c-kube-api-access-cfstz\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.328321 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/aa42cb7e-eb9f-45e6-bc43-f78740ab1726-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.329223 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ec148dc1-882a-465f-97a8-599a95131d6e-client-ca\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.330987 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ec148dc1-882a-465f-97a8-599a95131d6e-config\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.342816 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ec148dc1-882a-465f-97a8-599a95131d6e-serving-cert\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.345590 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8b82\" (UniqueName: \"kubernetes.io/projected/ec148dc1-882a-465f-97a8-599a95131d6e-kube-api-access-v8b82\") pod \"route-controller-manager-67dc87bc48-txnxr\" (UID: \"ec148dc1-882a-465f-97a8-599a95131d6e\") " pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.474491 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:49 crc kubenswrapper[4706]: I1206 05:27:49.734162 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr"] Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.888340 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" event={"ID":"ec148dc1-882a-465f-97a8-599a95131d6e","Type":"ContainerStarted","Data":"70af349e5122609a0802aab0fc1d8a5642481dd4dce49a5d9a357b0d765c07da"} Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.892874 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" event={"ID":"9f85a489-5b5f-43e2-8e22-6bed75ab5c6c","Type":"ContainerDied","Data":"2bec4c1c5ab4625aa97e4d927e62e884240faeac191750a8d0d3f67fd26da9f9"} Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.892922 4706 scope.go:117] "RemoveContainer" containerID="511e56896728a6f926837256a8ba920e4016ec8ddf5646ac9df8407daeb15edf" Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.892937 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d" Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.911140 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.911127 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-866ddd4d9c-kbc88" event={"ID":"aa42cb7e-eb9f-45e6-bc43-f78740ab1726","Type":"ContainerDied","Data":"9f841f4799748b565eb58fd81264e24c71551af83028853a0e9e52e86131759e"} Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.928420 4706 scope.go:117] "RemoveContainer" containerID="310956616c06ac3ccd167a95cbb4280db097ab033e695a65e3d6d14c4f259bcf" Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.932961 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-866ddd4d9c-kbc88"] Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.934195 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-866ddd4d9c-kbc88"] Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.950977 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d"] Dec 06 05:27:50 crc kubenswrapper[4706]: I1206 05:27:50.954778 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-8b979b485-gpv8d"] Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.697683 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8485f769cb-t8mxj"] Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.698481 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.702162 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.702710 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.703022 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.703300 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.703791 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.704410 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.712697 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.728303 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8485f769cb-t8mxj"] Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.759732 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-488pc\" (UniqueName: \"kubernetes.io/projected/e81df474-cf9e-474b-9cd3-27c5b3b930e1-kube-api-access-488pc\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.759803 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-config\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.759843 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-client-ca\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.759880 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-proxy-ca-bundles\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.759915 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e81df474-cf9e-474b-9cd3-27c5b3b930e1-serving-cert\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.861402 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-488pc\" (UniqueName: \"kubernetes.io/projected/e81df474-cf9e-474b-9cd3-27c5b3b930e1-kube-api-access-488pc\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.861465 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-config\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.861514 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-client-ca\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.861555 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-proxy-ca-bundles\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.861610 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e81df474-cf9e-474b-9cd3-27c5b3b930e1-serving-cert\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.862903 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-client-ca\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.863252 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-proxy-ca-bundles\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.863858 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e81df474-cf9e-474b-9cd3-27c5b3b930e1-config\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.872952 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e81df474-cf9e-474b-9cd3-27c5b3b930e1-serving-cert\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:51 crc kubenswrapper[4706]: I1206 05:27:51.877229 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-488pc\" (UniqueName: \"kubernetes.io/projected/e81df474-cf9e-474b-9cd3-27c5b3b930e1-kube-api-access-488pc\") pod \"controller-manager-8485f769cb-t8mxj\" (UID: \"e81df474-cf9e-474b-9cd3-27c5b3b930e1\") " pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:52 crc kubenswrapper[4706]: I1206 05:27:52.019492 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:52 crc kubenswrapper[4706]: I1206 05:27:52.043524 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9f85a489-5b5f-43e2-8e22-6bed75ab5c6c" path="/var/lib/kubelet/pods/9f85a489-5b5f-43e2-8e22-6bed75ab5c6c/volumes" Dec 06 05:27:52 crc kubenswrapper[4706]: I1206 05:27:52.044352 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa42cb7e-eb9f-45e6-bc43-f78740ab1726" path="/var/lib/kubelet/pods/aa42cb7e-eb9f-45e6-bc43-f78740ab1726/volumes" Dec 06 05:27:52 crc kubenswrapper[4706]: I1206 05:27:52.471954 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8485f769cb-t8mxj"] Dec 06 05:27:52 crc kubenswrapper[4706]: I1206 05:27:52.936654 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" event={"ID":"ec148dc1-882a-465f-97a8-599a95131d6e","Type":"ContainerStarted","Data":"cf35ef538acb2a79b09d1727cd62c919ee00b3c80927d9ae8ae4e6968ab7a320"} Dec 06 05:27:52 crc kubenswrapper[4706]: I1206 05:27:52.938228 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" event={"ID":"e81df474-cf9e-474b-9cd3-27c5b3b930e1","Type":"ContainerStarted","Data":"b9c27a73c0b13d2c97592e2417ddb1eafe40ced1ba8bfe1d7e7c0e89e71cc636"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.007678 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j5lbl"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.008496 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j5lbl" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="registry-server" containerID="cri-o://26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba" gracePeriod=30 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.026195 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-98kq2"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.026555 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-98kq2" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="registry-server" containerID="cri-o://92d79e0671426b72ca23e1771655166757ed0b3fd2ef3489f8b34cc1c211998c" gracePeriod=30 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.039179 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xptzp"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.044470 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" containerID="cri-o://ba70467ae38738266184ad8e81351f9df2124d6019a72d305a83293933d793c6" gracePeriod=30 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.062914 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wx94f"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.063341 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wx94f" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="registry-server" containerID="cri-o://317775150df92de25f45a4135dbf96e2c76ac1d007b9491475c05d59a00cd055" gracePeriod=30 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.073993 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9t9kd"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.075033 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.088453 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j4frb"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.088725 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j4frb" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="registry-server" containerID="cri-o://e2ad0d4599c20f803d7d4b64bccc309fbc3cbb9b90088169460a2106e2d0a5dd" gracePeriod=30 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.095553 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9t9kd"] Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.204069 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dlwl\" (UniqueName: \"kubernetes.io/projected/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-kube-api-access-4dlwl\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.204155 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.204197 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.305930 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dlwl\" (UniqueName: \"kubernetes.io/projected/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-kube-api-access-4dlwl\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.305999 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.306029 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.307663 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.312591 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.320751 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dlwl\" (UniqueName: \"kubernetes.io/projected/03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d-kube-api-access-4dlwl\") pod \"marketplace-operator-79b997595-9t9kd\" (UID: \"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d\") " pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.398374 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.812024 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-9t9kd"] Dec 06 05:27:54 crc kubenswrapper[4706]: W1206 05:27:54.827338 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03d87bf8_3c4d_4399_b7e9_dafa3bb98b4d.slice/crio-ebe10749bcaf3ef0b2fd8d820063a7958a71bcb32ec7a340002a0f2c9c1edff9 WatchSource:0}: Error finding container ebe10749bcaf3ef0b2fd8d820063a7958a71bcb32ec7a340002a0f2c9c1edff9: Status 404 returned error can't find the container with id ebe10749bcaf3ef0b2fd8d820063a7958a71bcb32ec7a340002a0f2c9c1edff9 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.932778 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.968955 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" event={"ID":"e81df474-cf9e-474b-9cd3-27c5b3b930e1","Type":"ContainerStarted","Data":"c45bd29e695fbf0ad21c25348ced2a9ab739ef22acb94d6725b1351e9981843a"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.969322 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.972906 4706 generic.go:334] "Generic (PLEG): container finished" podID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerID="e2ad0d4599c20f803d7d4b64bccc309fbc3cbb9b90088169460a2106e2d0a5dd" exitCode=0 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.972953 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerDied","Data":"e2ad0d4599c20f803d7d4b64bccc309fbc3cbb9b90088169460a2106e2d0a5dd"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.976292 4706 generic.go:334] "Generic (PLEG): container finished" podID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerID="317775150df92de25f45a4135dbf96e2c76ac1d007b9491475c05d59a00cd055" exitCode=0 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.976357 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wx94f" event={"ID":"404f2b83-1030-4b10-b1cf-c7db67aae01f","Type":"ContainerDied","Data":"317775150df92de25f45a4135dbf96e2c76ac1d007b9491475c05d59a00cd055"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.981745 4706 generic.go:334] "Generic (PLEG): container finished" podID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerID="26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba" exitCode=0 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.981813 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerDied","Data":"26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.981844 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j5lbl" event={"ID":"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202","Type":"ContainerDied","Data":"9de2f95bfc2b2a567aaa56a74eb7dcf1e157287d8d82bf197c8c930199456ca8"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.981865 4706 scope.go:117] "RemoveContainer" containerID="26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.982004 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j5lbl" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.984834 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.995771 4706 generic.go:334] "Generic (PLEG): container finished" podID="e9405376-0114-4bee-b245-f17b30f2594a" containerID="ba70467ae38738266184ad8e81351f9df2124d6019a72d305a83293933d793c6" exitCode=0 Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.995862 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" event={"ID":"e9405376-0114-4bee-b245-f17b30f2594a","Type":"ContainerDied","Data":"ba70467ae38738266184ad8e81351f9df2124d6019a72d305a83293933d793c6"} Dec 06 05:27:54 crc kubenswrapper[4706]: I1206 05:27:54.998729 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8485f769cb-t8mxj" podStartSLOduration=9.998710856 podStartE2EDuration="9.998710856s" podCreationTimestamp="2025-12-06 05:27:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:54.995990393 +0000 UTC m=+497.323814337" watchObservedRunningTime="2025-12-06 05:27:54.998710856 +0000 UTC m=+497.326534800" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.015062 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-catalog-content\") pod \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.015161 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wttgm\" (UniqueName: \"kubernetes.io/projected/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-kube-api-access-wttgm\") pod \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.015256 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-utilities\") pod \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\" (UID: \"5e6f7aa9-bbf3-4160-9eb8-e7d54c354202\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.019155 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-utilities" (OuterVolumeSpecName: "utilities") pod "5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" (UID: "5e6f7aa9-bbf3-4160-9eb8-e7d54c354202"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.019367 4706 generic.go:334] "Generic (PLEG): container finished" podID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerID="92d79e0671426b72ca23e1771655166757ed0b3fd2ef3489f8b34cc1c211998c" exitCode=0 Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.019525 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerDied","Data":"92d79e0671426b72ca23e1771655166757ed0b3fd2ef3489f8b34cc1c211998c"} Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.027830 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" event={"ID":"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d","Type":"ContainerStarted","Data":"ebe10749bcaf3ef0b2fd8d820063a7958a71bcb32ec7a340002a0f2c9c1edff9"} Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.028166 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.035651 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-kube-api-access-wttgm" (OuterVolumeSpecName: "kube-api-access-wttgm") pod "5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" (UID: "5e6f7aa9-bbf3-4160-9eb8-e7d54c354202"). InnerVolumeSpecName "kube-api-access-wttgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.036729 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.046530 4706 scope.go:117] "RemoveContainer" containerID="2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.049618 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-67dc87bc48-txnxr" podStartSLOduration=10.049603319 podStartE2EDuration="10.049603319s" podCreationTimestamp="2025-12-06 05:27:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:55.049006813 +0000 UTC m=+497.376830777" watchObservedRunningTime="2025-12-06 05:27:55.049603319 +0000 UTC m=+497.377427263" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.078977 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" (UID: "5e6f7aa9-bbf3-4160-9eb8-e7d54c354202"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.087807 4706 scope.go:117] "RemoveContainer" containerID="ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.112862 4706 scope.go:117] "RemoveContainer" containerID="26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba" Dec 06 05:27:55 crc kubenswrapper[4706]: E1206 05:27:55.113526 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba\": container with ID starting with 26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba not found: ID does not exist" containerID="26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.113650 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba"} err="failed to get container status \"26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba\": rpc error: code = NotFound desc = could not find container \"26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba\": container with ID starting with 26d6a521b2dc1b5558f348b152c395fab61190bb0ba46667df845db0b0645aba not found: ID does not exist" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.113750 4706 scope.go:117] "RemoveContainer" containerID="2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8" Dec 06 05:27:55 crc kubenswrapper[4706]: E1206 05:27:55.114521 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8\": container with ID starting with 2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8 not found: ID does not exist" containerID="2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.114578 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8"} err="failed to get container status \"2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8\": rpc error: code = NotFound desc = could not find container \"2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8\": container with ID starting with 2f5be58ab034da25a5147e99c9c473ae721aaeb2eef91abe3ec0134ece8468a8 not found: ID does not exist" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.114614 4706 scope.go:117] "RemoveContainer" containerID="ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4" Dec 06 05:27:55 crc kubenswrapper[4706]: E1206 05:27:55.115214 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4\": container with ID starting with ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4 not found: ID does not exist" containerID="ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.115297 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4"} err="failed to get container status \"ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4\": rpc error: code = NotFound desc = could not find container \"ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4\": container with ID starting with ab69b8f1d874f20f084f8e81f7b8328cd9912447560c5c0bde5dfd6b3d6435d4 not found: ID does not exist" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.115363 4706 scope.go:117] "RemoveContainer" containerID="128a6e82768f0cec748a1a7c63155a2de1ce3d17db05d8e9bc6454ebf5d1e6c3" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.119026 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.119150 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wttgm\" (UniqueName: \"kubernetes.io/projected/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-kube-api-access-wttgm\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.119228 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.244132 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.321390 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-utilities\") pod \"2985a55d-3af2-4dd6-adde-7714459e08c3\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.321453 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srtrx\" (UniqueName: \"kubernetes.io/projected/2985a55d-3af2-4dd6-adde-7714459e08c3-kube-api-access-srtrx\") pod \"2985a55d-3af2-4dd6-adde-7714459e08c3\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.321510 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-catalog-content\") pod \"2985a55d-3af2-4dd6-adde-7714459e08c3\" (UID: \"2985a55d-3af2-4dd6-adde-7714459e08c3\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.322649 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-utilities" (OuterVolumeSpecName: "utilities") pod "2985a55d-3af2-4dd6-adde-7714459e08c3" (UID: "2985a55d-3af2-4dd6-adde-7714459e08c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.328481 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2985a55d-3af2-4dd6-adde-7714459e08c3-kube-api-access-srtrx" (OuterVolumeSpecName: "kube-api-access-srtrx") pod "2985a55d-3af2-4dd6-adde-7714459e08c3" (UID: "2985a55d-3af2-4dd6-adde-7714459e08c3"). InnerVolumeSpecName "kube-api-access-srtrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.357538 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.362153 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j5lbl"] Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.369010 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j5lbl"] Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.385795 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.387634 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.422343 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-utilities\") pod \"404f2b83-1030-4b10-b1cf-c7db67aae01f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.422424 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44ghq\" (UniqueName: \"kubernetes.io/projected/404f2b83-1030-4b10-b1cf-c7db67aae01f-kube-api-access-44ghq\") pod \"404f2b83-1030-4b10-b1cf-c7db67aae01f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.422518 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-catalog-content\") pod \"404f2b83-1030-4b10-b1cf-c7db67aae01f\" (UID: \"404f2b83-1030-4b10-b1cf-c7db67aae01f\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.422717 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.422733 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srtrx\" (UniqueName: \"kubernetes.io/projected/2985a55d-3af2-4dd6-adde-7714459e08c3-kube-api-access-srtrx\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.423693 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-utilities" (OuterVolumeSpecName: "utilities") pod "404f2b83-1030-4b10-b1cf-c7db67aae01f" (UID: "404f2b83-1030-4b10-b1cf-c7db67aae01f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.426253 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/404f2b83-1030-4b10-b1cf-c7db67aae01f-kube-api-access-44ghq" (OuterVolumeSpecName: "kube-api-access-44ghq") pod "404f2b83-1030-4b10-b1cf-c7db67aae01f" (UID: "404f2b83-1030-4b10-b1cf-c7db67aae01f"). InnerVolumeSpecName "kube-api-access-44ghq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.441125 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2985a55d-3af2-4dd6-adde-7714459e08c3" (UID: "2985a55d-3af2-4dd6-adde-7714459e08c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.456865 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "404f2b83-1030-4b10-b1cf-c7db67aae01f" (UID: "404f2b83-1030-4b10-b1cf-c7db67aae01f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523303 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-utilities\") pod \"2d21abc6-d736-47df-8eac-4dee0691a92c\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523399 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmt24\" (UniqueName: \"kubernetes.io/projected/e9405376-0114-4bee-b245-f17b30f2594a-kube-api-access-fmt24\") pod \"e9405376-0114-4bee-b245-f17b30f2594a\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523431 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-operator-metrics\") pod \"e9405376-0114-4bee-b245-f17b30f2594a\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523460 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tkt56\" (UniqueName: \"kubernetes.io/projected/2d21abc6-d736-47df-8eac-4dee0691a92c-kube-api-access-tkt56\") pod \"2d21abc6-d736-47df-8eac-4dee0691a92c\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523504 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-catalog-content\") pod \"2d21abc6-d736-47df-8eac-4dee0691a92c\" (UID: \"2d21abc6-d736-47df-8eac-4dee0691a92c\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523530 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-trusted-ca\") pod \"e9405376-0114-4bee-b245-f17b30f2594a\" (UID: \"e9405376-0114-4bee-b245-f17b30f2594a\") " Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523742 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523754 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44ghq\" (UniqueName: \"kubernetes.io/projected/404f2b83-1030-4b10-b1cf-c7db67aae01f-kube-api-access-44ghq\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523764 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2985a55d-3af2-4dd6-adde-7714459e08c3-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.523772 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/404f2b83-1030-4b10-b1cf-c7db67aae01f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.524426 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-utilities" (OuterVolumeSpecName: "utilities") pod "2d21abc6-d736-47df-8eac-4dee0691a92c" (UID: "2d21abc6-d736-47df-8eac-4dee0691a92c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.524518 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "e9405376-0114-4bee-b245-f17b30f2594a" (UID: "e9405376-0114-4bee-b245-f17b30f2594a"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.527589 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d21abc6-d736-47df-8eac-4dee0691a92c-kube-api-access-tkt56" (OuterVolumeSpecName: "kube-api-access-tkt56") pod "2d21abc6-d736-47df-8eac-4dee0691a92c" (UID: "2d21abc6-d736-47df-8eac-4dee0691a92c"). InnerVolumeSpecName "kube-api-access-tkt56". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.530602 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e9405376-0114-4bee-b245-f17b30f2594a-kube-api-access-fmt24" (OuterVolumeSpecName: "kube-api-access-fmt24") pod "e9405376-0114-4bee-b245-f17b30f2594a" (UID: "e9405376-0114-4bee-b245-f17b30f2594a"). InnerVolumeSpecName "kube-api-access-fmt24". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.530694 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "e9405376-0114-4bee-b245-f17b30f2594a" (UID: "e9405376-0114-4bee-b245-f17b30f2594a"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.625031 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.625091 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmt24\" (UniqueName: \"kubernetes.io/projected/e9405376-0114-4bee-b245-f17b30f2594a-kube-api-access-fmt24\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.625122 4706 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.625135 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tkt56\" (UniqueName: \"kubernetes.io/projected/2d21abc6-d736-47df-8eac-4dee0691a92c-kube-api-access-tkt56\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.625147 4706 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e9405376-0114-4bee-b245-f17b30f2594a-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.670884 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2d21abc6-d736-47df-8eac-4dee0691a92c" (UID: "2d21abc6-d736-47df-8eac-4dee0691a92c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:27:55 crc kubenswrapper[4706]: I1206 05:27:55.726980 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d21abc6-d736-47df-8eac-4dee0691a92c-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.029636 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" event={"ID":"e9405376-0114-4bee-b245-f17b30f2594a","Type":"ContainerDied","Data":"ca2b3785e8020a03fbd09b8cf88c5110a7e7b23110ab5ef283eaa7594ff939a2"} Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.029696 4706 scope.go:117] "RemoveContainer" containerID="ba70467ae38738266184ad8e81351f9df2124d6019a72d305a83293933d793c6" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.029709 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-xptzp" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.033745 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-98kq2" event={"ID":"2985a55d-3af2-4dd6-adde-7714459e08c3","Type":"ContainerDied","Data":"50065bd8f40f80eb560942fc41aa0793235313ff93ad9c241e8d8f6c2c6cad2f"} Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.033888 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-98kq2" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.041369 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j4frb" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.044209 4706 scope.go:117] "RemoveContainer" containerID="92d79e0671426b72ca23e1771655166757ed0b3fd2ef3489f8b34cc1c211998c" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.044618 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wx94f" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.046965 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" path="/var/lib/kubelet/pods/5e6f7aa9-bbf3-4160-9eb8-e7d54c354202/volumes" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.048079 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.048106 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" event={"ID":"03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d","Type":"ContainerStarted","Data":"9b8ecc1913e9a518f9723d682427a1fb67b2553955e9aea10929bd26161b0456"} Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.048124 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j4frb" event={"ID":"2d21abc6-d736-47df-8eac-4dee0691a92c","Type":"ContainerDied","Data":"ceb960ccff7e4638e02d6ce722ec332bae7b6006917dec1a7b92ae581d4d2497"} Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.048142 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wx94f" event={"ID":"404f2b83-1030-4b10-b1cf-c7db67aae01f","Type":"ContainerDied","Data":"0ef2b4d64cbdd444c63d782b02b7af1aa85fc9fbc22031a69432b324ee37d2ca"} Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.051036 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.065140 4706 scope.go:117] "RemoveContainer" containerID="b49e3047ac6356c1753cb6d45b6887afb73cb85ac18544ae0f0657b8caa04cbd" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.084198 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-9t9kd" podStartSLOduration=2.0841815 podStartE2EDuration="2.0841815s" podCreationTimestamp="2025-12-06 05:27:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:27:56.059447487 +0000 UTC m=+498.387271451" watchObservedRunningTime="2025-12-06 05:27:56.0841815 +0000 UTC m=+498.412005444" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.086447 4706 scope.go:117] "RemoveContainer" containerID="2437e439b6f7daf1b036314ee54767cb3812b9bdce40f622d96a382a98f8e597" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.104582 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-98kq2"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.108373 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-98kq2"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.122235 4706 scope.go:117] "RemoveContainer" containerID="e2ad0d4599c20f803d7d4b64bccc309fbc3cbb9b90088169460a2106e2d0a5dd" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.124732 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xptzp"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.130158 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-xptzp"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.138065 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j4frb"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.151776 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j4frb"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.151836 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wx94f"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.154945 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wx94f"] Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.163688 4706 scope.go:117] "RemoveContainer" containerID="ede8213789bca7ed5e6b8ac6b60825693d5673d776f627ce68e07b14b0fde798" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.176314 4706 scope.go:117] "RemoveContainer" containerID="88b67b10ab6d97c84ebeab02b174ded59ef788e294e6260611fed0c922a202be" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.198304 4706 scope.go:117] "RemoveContainer" containerID="317775150df92de25f45a4135dbf96e2c76ac1d007b9491475c05d59a00cd055" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.220901 4706 scope.go:117] "RemoveContainer" containerID="b077826243b8e814c0f7086499e2748c7ab02c6fbd44d144f33c161fc220bb3c" Dec 06 05:27:56 crc kubenswrapper[4706]: I1206 05:27:56.238917 4706 scope.go:117] "RemoveContainer" containerID="9832f7a7badc382a0d8db8ade0a6b05781c85b398accc1be9de35d344646a188" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415309 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-6qchr"] Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415884 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415897 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415908 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415916 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415928 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415936 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415947 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415954 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415965 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415971 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415977 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415983 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.415994 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.415999 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416009 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416016 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="extract-content" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416027 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416032 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416039 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416059 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416065 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416070 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416078 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416083 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416094 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416100 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="extract-utilities" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416197 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416208 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416218 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416226 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e6f7aa9-bbf3-4160-9eb8-e7d54c354202" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416235 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" containerName="registry-server" Dec 06 05:27:57 crc kubenswrapper[4706]: E1206 05:27:57.416315 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416322 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416394 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e9405376-0114-4bee-b245-f17b30f2594a" containerName="marketplace-operator" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.416966 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.428029 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qchr"] Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.429025 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.552523 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa0b57c0-e802-4273-99c5-43e1c8fd1887-catalog-content\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.552605 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa0b57c0-e802-4273-99c5-43e1c8fd1887-utilities\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.552634 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwjsw\" (UniqueName: \"kubernetes.io/projected/fa0b57c0-e802-4273-99c5-43e1c8fd1887-kube-api-access-rwjsw\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.615101 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jmcdp"] Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.616733 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.619108 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.627189 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jmcdp"] Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653476 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa0b57c0-e802-4273-99c5-43e1c8fd1887-utilities\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653530 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwjsw\" (UniqueName: \"kubernetes.io/projected/fa0b57c0-e802-4273-99c5-43e1c8fd1887-kube-api-access-rwjsw\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653570 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50601575-3e02-451a-97c3-24b24683e5b8-utilities\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653605 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjvbk\" (UniqueName: \"kubernetes.io/projected/50601575-3e02-451a-97c3-24b24683e5b8-kube-api-access-rjvbk\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653652 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa0b57c0-e802-4273-99c5-43e1c8fd1887-catalog-content\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653722 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50601575-3e02-451a-97c3-24b24683e5b8-catalog-content\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.653978 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa0b57c0-e802-4273-99c5-43e1c8fd1887-utilities\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.654133 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa0b57c0-e802-4273-99c5-43e1c8fd1887-catalog-content\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.673042 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwjsw\" (UniqueName: \"kubernetes.io/projected/fa0b57c0-e802-4273-99c5-43e1c8fd1887-kube-api-access-rwjsw\") pod \"redhat-marketplace-6qchr\" (UID: \"fa0b57c0-e802-4273-99c5-43e1c8fd1887\") " pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.740194 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.755031 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50601575-3e02-451a-97c3-24b24683e5b8-catalog-content\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.755109 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50601575-3e02-451a-97c3-24b24683e5b8-utilities\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.755148 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjvbk\" (UniqueName: \"kubernetes.io/projected/50601575-3e02-451a-97c3-24b24683e5b8-kube-api-access-rjvbk\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.755588 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50601575-3e02-451a-97c3-24b24683e5b8-utilities\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.755676 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50601575-3e02-451a-97c3-24b24683e5b8-catalog-content\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:57 crc kubenswrapper[4706]: I1206 05:27:57.777722 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjvbk\" (UniqueName: \"kubernetes.io/projected/50601575-3e02-451a-97c3-24b24683e5b8-kube-api-access-rjvbk\") pod \"redhat-operators-jmcdp\" (UID: \"50601575-3e02-451a-97c3-24b24683e5b8\") " pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:57.939772 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:58.048184 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2985a55d-3af2-4dd6-adde-7714459e08c3" path="/var/lib/kubelet/pods/2985a55d-3af2-4dd6-adde-7714459e08c3/volumes" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:58.049195 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d21abc6-d736-47df-8eac-4dee0691a92c" path="/var/lib/kubelet/pods/2d21abc6-d736-47df-8eac-4dee0691a92c/volumes" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:58.049816 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="404f2b83-1030-4b10-b1cf-c7db67aae01f" path="/var/lib/kubelet/pods/404f2b83-1030-4b10-b1cf-c7db67aae01f/volumes" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:58.051309 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e9405376-0114-4bee-b245-f17b30f2594a" path="/var/lib/kubelet/pods/e9405376-0114-4bee-b245-f17b30f2594a/volumes" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:58.169468 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-6qchr"] Dec 06 05:27:59 crc kubenswrapper[4706]: W1206 05:27:58.174951 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfa0b57c0_e802_4273_99c5_43e1c8fd1887.slice/crio-fa71da054417f440f19e79be0345967a6c11feb86189012461e7b4ffd0a33955 WatchSource:0}: Error finding container fa71da054417f440f19e79be0345967a6c11feb86189012461e7b4ffd0a33955: Status 404 returned error can't find the container with id fa71da054417f440f19e79be0345967a6c11feb86189012461e7b4ffd0a33955 Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.070362 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qchr" event={"ID":"fa0b57c0-e802-4273-99c5-43e1c8fd1887","Type":"ContainerStarted","Data":"fa71da054417f440f19e79be0345967a6c11feb86189012461e7b4ffd0a33955"} Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.599991 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jmcdp"] Dec 06 05:27:59 crc kubenswrapper[4706]: W1206 05:27:59.604037 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50601575_3e02_451a_97c3_24b24683e5b8.slice/crio-fef64cc104705adf6ad7dc14c60a4af2a3fc6cea00317faba8faf668c2ab77e5 WatchSource:0}: Error finding container fef64cc104705adf6ad7dc14c60a4af2a3fc6cea00317faba8faf668c2ab77e5: Status 404 returned error can't find the container with id fef64cc104705adf6ad7dc14c60a4af2a3fc6cea00317faba8faf668c2ab77e5 Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.816518 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xn9dq"] Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.818405 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.820564 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.828071 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xn9dq"] Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.885473 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msw6b\" (UniqueName: \"kubernetes.io/projected/9d06d7a2-470f-433c-870b-c78293eeb02b-kube-api-access-msw6b\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.885537 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-catalog-content\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.885608 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-utilities\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.987333 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msw6b\" (UniqueName: \"kubernetes.io/projected/9d06d7a2-470f-433c-870b-c78293eeb02b-kube-api-access-msw6b\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.987746 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-catalog-content\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.988281 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-catalog-content\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.988401 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-utilities\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:27:59 crc kubenswrapper[4706]: I1206 05:27:59.988750 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-utilities\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.011784 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msw6b\" (UniqueName: \"kubernetes.io/projected/9d06d7a2-470f-433c-870b-c78293eeb02b-kube-api-access-msw6b\") pod \"community-operators-xn9dq\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.015008 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lvmnq"] Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.016001 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.017920 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.025941 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lvmnq"] Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.081265 4706 generic.go:334] "Generic (PLEG): container finished" podID="50601575-3e02-451a-97c3-24b24683e5b8" containerID="8bac4c765cfd31aefbd51ac8f03b124b7129e82e30142d90b36df6c7ea847f83" exitCode=0 Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.081325 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmcdp" event={"ID":"50601575-3e02-451a-97c3-24b24683e5b8","Type":"ContainerDied","Data":"8bac4c765cfd31aefbd51ac8f03b124b7129e82e30142d90b36df6c7ea847f83"} Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.081377 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmcdp" event={"ID":"50601575-3e02-451a-97c3-24b24683e5b8","Type":"ContainerStarted","Data":"fef64cc104705adf6ad7dc14c60a4af2a3fc6cea00317faba8faf668c2ab77e5"} Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.084325 4706 generic.go:334] "Generic (PLEG): container finished" podID="fa0b57c0-e802-4273-99c5-43e1c8fd1887" containerID="3a4a1f7a210a009f0d4e9ccc80a95974d785235810eb5e0361ded288d3778c91" exitCode=0 Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.084374 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qchr" event={"ID":"fa0b57c0-e802-4273-99c5-43e1c8fd1887","Type":"ContainerDied","Data":"3a4a1f7a210a009f0d4e9ccc80a95974d785235810eb5e0361ded288d3778c91"} Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.085606 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.090773 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-utilities\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.090876 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qrlt\" (UniqueName: \"kubernetes.io/projected/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-kube-api-access-5qrlt\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.090938 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-catalog-content\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.152978 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.192656 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-utilities\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.192749 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qrlt\" (UniqueName: \"kubernetes.io/projected/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-kube-api-access-5qrlt\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.192811 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-catalog-content\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.193163 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-utilities\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.193571 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-catalog-content\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.211038 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qrlt\" (UniqueName: \"kubernetes.io/projected/a4c7cd15-784b-4201-b0e2-f463f15e9bf6-kube-api-access-5qrlt\") pod \"certified-operators-lvmnq\" (UID: \"a4c7cd15-784b-4201-b0e2-f463f15e9bf6\") " pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.353968 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.551799 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xn9dq"] Dec 06 05:28:00 crc kubenswrapper[4706]: I1206 05:28:00.826990 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lvmnq"] Dec 06 05:28:00 crc kubenswrapper[4706]: W1206 05:28:00.852925 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4c7cd15_784b_4201_b0e2_f463f15e9bf6.slice/crio-e93b1648edd5de31c79f423a0e94fcb8c5cfc3628d484ba072ff45d73cbb2860 WatchSource:0}: Error finding container e93b1648edd5de31c79f423a0e94fcb8c5cfc3628d484ba072ff45d73cbb2860: Status 404 returned error can't find the container with id e93b1648edd5de31c79f423a0e94fcb8c5cfc3628d484ba072ff45d73cbb2860 Dec 06 05:28:01 crc kubenswrapper[4706]: I1206 05:28:01.091725 4706 generic.go:334] "Generic (PLEG): container finished" podID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerID="a5bd9ea1d1199644dbbba32baf6345fb765d6ddb866a19a6f22bb3329b062e0f" exitCode=0 Dec 06 05:28:01 crc kubenswrapper[4706]: I1206 05:28:01.091809 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerDied","Data":"a5bd9ea1d1199644dbbba32baf6345fb765d6ddb866a19a6f22bb3329b062e0f"} Dec 06 05:28:01 crc kubenswrapper[4706]: I1206 05:28:01.092263 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerStarted","Data":"a94d8b135a310d805ad745e17345e5f787bcc2aea12471d1cf986abba4316a58"} Dec 06 05:28:01 crc kubenswrapper[4706]: I1206 05:28:01.093232 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvmnq" event={"ID":"a4c7cd15-784b-4201-b0e2-f463f15e9bf6","Type":"ContainerStarted","Data":"e93b1648edd5de31c79f423a0e94fcb8c5cfc3628d484ba072ff45d73cbb2860"} Dec 06 05:28:02 crc kubenswrapper[4706]: I1206 05:28:02.122266 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4c7cd15-784b-4201-b0e2-f463f15e9bf6" containerID="1be3f1b521f376698ccefa9e8252123004e0b6e487587c3f076204e4b18f482b" exitCode=0 Dec 06 05:28:02 crc kubenswrapper[4706]: I1206 05:28:02.122333 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvmnq" event={"ID":"a4c7cd15-784b-4201-b0e2-f463f15e9bf6","Type":"ContainerDied","Data":"1be3f1b521f376698ccefa9e8252123004e0b6e487587c3f076204e4b18f482b"} Dec 06 05:28:02 crc kubenswrapper[4706]: I1206 05:28:02.699169 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-rrslb" Dec 06 05:28:02 crc kubenswrapper[4706]: I1206 05:28:02.759528 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-njll8"] Dec 06 05:28:06 crc kubenswrapper[4706]: I1206 05:28:06.145607 4706 generic.go:334] "Generic (PLEG): container finished" podID="50601575-3e02-451a-97c3-24b24683e5b8" containerID="2d176c54a10f3dde97127f913a8993441398f9125f1c4eace5317c7dfcc57717" exitCode=0 Dec 06 05:28:06 crc kubenswrapper[4706]: I1206 05:28:06.145693 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmcdp" event={"ID":"50601575-3e02-451a-97c3-24b24683e5b8","Type":"ContainerDied","Data":"2d176c54a10f3dde97127f913a8993441398f9125f1c4eace5317c7dfcc57717"} Dec 06 05:28:06 crc kubenswrapper[4706]: I1206 05:28:06.150135 4706 generic.go:334] "Generic (PLEG): container finished" podID="fa0b57c0-e802-4273-99c5-43e1c8fd1887" containerID="6ffc9aed1a16e62308f4ad49c64faa3c2dfa701afa3dd60c6e246f3b078e5eaa" exitCode=0 Dec 06 05:28:06 crc kubenswrapper[4706]: I1206 05:28:06.150165 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qchr" event={"ID":"fa0b57c0-e802-4273-99c5-43e1c8fd1887","Type":"ContainerDied","Data":"6ffc9aed1a16e62308f4ad49c64faa3c2dfa701afa3dd60c6e246f3b078e5eaa"} Dec 06 05:28:08 crc kubenswrapper[4706]: I1206 05:28:08.168547 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4c7cd15-784b-4201-b0e2-f463f15e9bf6" containerID="2cad9c119d968b0dd4bb8aa3345c7513cb348455982eb8bbfd6d287acec132c4" exitCode=0 Dec 06 05:28:08 crc kubenswrapper[4706]: I1206 05:28:08.168634 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvmnq" event={"ID":"a4c7cd15-784b-4201-b0e2-f463f15e9bf6","Type":"ContainerDied","Data":"2cad9c119d968b0dd4bb8aa3345c7513cb348455982eb8bbfd6d287acec132c4"} Dec 06 05:28:14 crc kubenswrapper[4706]: I1206 05:28:14.207296 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerStarted","Data":"e56779f538657cb0da003ac85956bff6d900551b32e6483bb8d3f6dcbb2d2ee3"} Dec 06 05:28:15 crc kubenswrapper[4706]: I1206 05:28:15.215430 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jmcdp" event={"ID":"50601575-3e02-451a-97c3-24b24683e5b8","Type":"ContainerStarted","Data":"38bd9ae4cb25b0c96a862bbd6868e20a08984b46b724307ce42e01564bab09d8"} Dec 06 05:28:15 crc kubenswrapper[4706]: I1206 05:28:15.217523 4706 generic.go:334] "Generic (PLEG): container finished" podID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerID="e56779f538657cb0da003ac85956bff6d900551b32e6483bb8d3f6dcbb2d2ee3" exitCode=0 Dec 06 05:28:15 crc kubenswrapper[4706]: I1206 05:28:15.217593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerDied","Data":"e56779f538657cb0da003ac85956bff6d900551b32e6483bb8d3f6dcbb2d2ee3"} Dec 06 05:28:15 crc kubenswrapper[4706]: I1206 05:28:15.220242 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-6qchr" event={"ID":"fa0b57c0-e802-4273-99c5-43e1c8fd1887","Type":"ContainerStarted","Data":"68abd5e6c269862097ef921f87e09737944138793acc39b276dc750dc332f041"} Dec 06 05:28:18 crc kubenswrapper[4706]: I1206 05:28:18.261188 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jmcdp" podStartSLOduration=10.880834334 podStartE2EDuration="21.261126031s" podCreationTimestamp="2025-12-06 05:27:57 +0000 UTC" firstStartedPulling="2025-12-06 05:28:00.085357591 +0000 UTC m=+502.413181535" lastFinishedPulling="2025-12-06 05:28:10.465649248 +0000 UTC m=+512.793473232" observedRunningTime="2025-12-06 05:28:18.259211849 +0000 UTC m=+520.587035813" watchObservedRunningTime="2025-12-06 05:28:18.261126031 +0000 UTC m=+520.588950035" Dec 06 05:28:19 crc kubenswrapper[4706]: E1206 05:28:19.004415 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Unknown desc = initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad: can't talk to a V1 container registry" image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad" Dec 06 05:28:19 crc kubenswrapper[4706]: E1206 05:28:19.004623 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:registry-server,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad,Command:[/bin/opm],Args:[serve /extracted-catalog/catalog --cache-dir=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:grpc,HostPort:0,ContainerPort:50051,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOMEMLIMIT,Value:40MiB,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{41943040 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5qrlt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:10,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[grpc_health_probe -addr=:50051],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:0,TimeoutSeconds:5,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:10,TerminationGracePeriodSeconds:nil,},ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-lvmnq_openshift-marketplace(a4c7cd15-784b-4201-b0e2-f463f15e9bf6): ErrImagePull: initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad: can't talk to a V1 container registry" logger="UnhandledError" Dec 06 05:28:19 crc kubenswrapper[4706]: E1206 05:28:19.007533 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"registry-server\" with ErrImagePull: \"initializing source docker://quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad: can't talk to a V1 container registry\"" pod="openshift-marketplace/certified-operators-lvmnq" podUID="a4c7cd15-784b-4201-b0e2-f463f15e9bf6" Dec 06 05:28:20 crc kubenswrapper[4706]: I1206 05:28:20.266928 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-6qchr" podStartSLOduration=13.083296021 podStartE2EDuration="23.26690289s" podCreationTimestamp="2025-12-06 05:27:57 +0000 UTC" firstStartedPulling="2025-12-06 05:28:00.086479131 +0000 UTC m=+502.414303075" lastFinishedPulling="2025-12-06 05:28:10.27008599 +0000 UTC m=+512.597909944" observedRunningTime="2025-12-06 05:28:20.263662183 +0000 UTC m=+522.591486137" watchObservedRunningTime="2025-12-06 05:28:20.26690289 +0000 UTC m=+522.594726834" Dec 06 05:28:25 crc kubenswrapper[4706]: I1206 05:28:25.279187 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerStarted","Data":"5d0ed54994801eb3f1d781283344c0e621fa0985d244c0ede079797d77175d7f"} Dec 06 05:28:25 crc kubenswrapper[4706]: I1206 05:28:25.299237 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xn9dq" podStartSLOduration=2.486925042 podStartE2EDuration="26.299220172s" podCreationTimestamp="2025-12-06 05:27:59 +0000 UTC" firstStartedPulling="2025-12-06 05:28:01.093272978 +0000 UTC m=+503.421096922" lastFinishedPulling="2025-12-06 05:28:24.905568108 +0000 UTC m=+527.233392052" observedRunningTime="2025-12-06 05:28:25.298883563 +0000 UTC m=+527.626707527" watchObservedRunningTime="2025-12-06 05:28:25.299220172 +0000 UTC m=+527.627044116" Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.740829 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.740885 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.782719 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.802148 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" podUID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" containerName="registry" containerID="cri-o://0814d20245d5d4b36fdc6b8cc07241a34c47a4cd54da2bdc70e8d058dd9d5ea0" gracePeriod=30 Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.940443 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.940818 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:28:27 crc kubenswrapper[4706]: I1206 05:28:27.986417 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:28:28 crc kubenswrapper[4706]: I1206 05:28:28.250689 4706 patch_prober.go:28] interesting pod/image-registry-697d97f7c8-njll8 container/registry namespace/openshift-image-registry: Readiness probe status=failure output="Get \"https://10.217.0.15:5000/healthz\": dial tcp 10.217.0.15:5000: connect: connection refused" start-of-body= Dec 06 05:28:28 crc kubenswrapper[4706]: I1206 05:28:28.250778 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" podUID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" containerName="registry" probeResult="failure" output="Get \"https://10.217.0.15:5000/healthz\": dial tcp 10.217.0.15:5000: connect: connection refused" Dec 06 05:28:28 crc kubenswrapper[4706]: I1206 05:28:28.298216 4706 generic.go:334] "Generic (PLEG): container finished" podID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" containerID="0814d20245d5d4b36fdc6b8cc07241a34c47a4cd54da2bdc70e8d058dd9d5ea0" exitCode=0 Dec 06 05:28:28 crc kubenswrapper[4706]: I1206 05:28:28.298279 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" event={"ID":"cbcef7ec-a2f0-4363-93e6-772d6d35d571","Type":"ContainerDied","Data":"0814d20245d5d4b36fdc6b8cc07241a34c47a4cd54da2bdc70e8d058dd9d5ea0"} Dec 06 05:28:28 crc kubenswrapper[4706]: I1206 05:28:28.346831 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jmcdp" Dec 06 05:28:28 crc kubenswrapper[4706]: I1206 05:28:28.350750 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-6qchr" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.312121 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" event={"ID":"cbcef7ec-a2f0-4363-93e6-772d6d35d571","Type":"ContainerDied","Data":"e7daac2ac8be4ddc494adf6491ed17519d2d27a6062df2ded8fce5295e589ac5"} Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.312195 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7daac2ac8be4ddc494adf6491ed17519d2d27a6062df2ded8fce5295e589ac5" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.344082 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.457457 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-bound-sa-token\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.458401 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jmlj\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-kube-api-access-9jmlj\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.458557 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbcef7ec-a2f0-4363-93e6-772d6d35d571-installation-pull-secrets\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.458625 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-tls\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.459990 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.460203 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-certificates\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.460280 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbcef7ec-a2f0-4363-93e6-772d6d35d571-ca-trust-extracted\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.460524 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-trusted-ca\") pod \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\" (UID: \"cbcef7ec-a2f0-4363-93e6-772d6d35d571\") " Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.461518 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.461846 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.465631 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbcef7ec-a2f0-4363-93e6-772d6d35d571-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.465749 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.466584 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.467029 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-kube-api-access-9jmlj" (OuterVolumeSpecName: "kube-api-access-9jmlj") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "kube-api-access-9jmlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.475719 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.487272 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbcef7ec-a2f0-4363-93e6-772d6d35d571-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "cbcef7ec-a2f0-4363-93e6-772d6d35d571" (UID: "cbcef7ec-a2f0-4363-93e6-772d6d35d571"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563030 4706 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cbcef7ec-a2f0-4363-93e6-772d6d35d571-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563172 4706 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563203 4706 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-registry-certificates\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563230 4706 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cbcef7ec-a2f0-4363-93e6-772d6d35d571-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563256 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cbcef7ec-a2f0-4363-93e6-772d6d35d571-trusted-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563282 4706 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-bound-sa-token\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:29 crc kubenswrapper[4706]: I1206 05:28:29.563307 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jmlj\" (UniqueName: \"kubernetes.io/projected/cbcef7ec-a2f0-4363-93e6-772d6d35d571-kube-api-access-9jmlj\") on node \"crc\" DevicePath \"\"" Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.154238 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.154329 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.202410 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.317272 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-njll8" Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.355227 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-njll8"] Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.360473 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-njll8"] Dec 06 05:28:30 crc kubenswrapper[4706]: I1206 05:28:30.361230 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:28:32 crc kubenswrapper[4706]: I1206 05:28:32.042398 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" path="/var/lib/kubelet/pods/cbcef7ec-a2f0-4363-93e6-772d6d35d571/volumes" Dec 06 05:28:32 crc kubenswrapper[4706]: I1206 05:28:32.331293 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lvmnq" event={"ID":"a4c7cd15-784b-4201-b0e2-f463f15e9bf6","Type":"ContainerStarted","Data":"51a61b18f67dfea0df52b6e4e047efefb6426fc7fb1f43cfa738027b060be0e6"} Dec 06 05:28:32 crc kubenswrapper[4706]: I1206 05:28:32.353076 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lvmnq" podStartSLOduration=4.064529483 podStartE2EDuration="32.353057881s" podCreationTimestamp="2025-12-06 05:28:00 +0000 UTC" firstStartedPulling="2025-12-06 05:28:03.135264843 +0000 UTC m=+505.463088797" lastFinishedPulling="2025-12-06 05:28:31.423793251 +0000 UTC m=+533.751617195" observedRunningTime="2025-12-06 05:28:32.3500722 +0000 UTC m=+534.677896154" watchObservedRunningTime="2025-12-06 05:28:32.353057881 +0000 UTC m=+534.680881825" Dec 06 05:28:40 crc kubenswrapper[4706]: I1206 05:28:40.354353 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:40 crc kubenswrapper[4706]: I1206 05:28:40.354902 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:40 crc kubenswrapper[4706]: I1206 05:28:40.387419 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:28:40 crc kubenswrapper[4706]: I1206 05:28:40.421656 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lvmnq" Dec 06 05:29:35 crc kubenswrapper[4706]: I1206 05:29:35.961985 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:29:35 crc kubenswrapper[4706]: I1206 05:29:35.963161 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:29:38 crc kubenswrapper[4706]: I1206 05:29:38.833334 4706 scope.go:117] "RemoveContainer" containerID="0814d20245d5d4b36fdc6b8cc07241a34c47a4cd54da2bdc70e8d058dd9d5ea0" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.191829 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj"] Dec 06 05:30:00 crc kubenswrapper[4706]: E1206 05:30:00.192689 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" containerName="registry" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.192703 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" containerName="registry" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.192815 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbcef7ec-a2f0-4363-93e6-772d6d35d571" containerName="registry" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.193192 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.197212 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.198265 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.206595 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj"] Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.345118 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a8e37b91-30b0-44fe-96d5-e01d222993b8-config-volume\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.345193 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87ts9\" (UniqueName: \"kubernetes.io/projected/a8e37b91-30b0-44fe-96d5-e01d222993b8-kube-api-access-87ts9\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.345616 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a8e37b91-30b0-44fe-96d5-e01d222993b8-secret-volume\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.446865 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87ts9\" (UniqueName: \"kubernetes.io/projected/a8e37b91-30b0-44fe-96d5-e01d222993b8-kube-api-access-87ts9\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.447033 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a8e37b91-30b0-44fe-96d5-e01d222993b8-secret-volume\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.447103 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a8e37b91-30b0-44fe-96d5-e01d222993b8-config-volume\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.451317 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a8e37b91-30b0-44fe-96d5-e01d222993b8-config-volume\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.459175 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a8e37b91-30b0-44fe-96d5-e01d222993b8-secret-volume\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.470176 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87ts9\" (UniqueName: \"kubernetes.io/projected/a8e37b91-30b0-44fe-96d5-e01d222993b8-kube-api-access-87ts9\") pod \"collect-profiles-29416650-fxvdj\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.556206 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.805738 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj"] Dec 06 05:30:00 crc kubenswrapper[4706]: I1206 05:30:00.855998 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" event={"ID":"a8e37b91-30b0-44fe-96d5-e01d222993b8","Type":"ContainerStarted","Data":"5d25c65433d936de5f95e41b4671ef88c17f9e0a5e4461f8ba8b35bffefcc259"} Dec 06 05:30:02 crc kubenswrapper[4706]: I1206 05:30:02.871218 4706 generic.go:334] "Generic (PLEG): container finished" podID="a8e37b91-30b0-44fe-96d5-e01d222993b8" containerID="75f1758be77ed1bb0894a18feb0b5c390ebc10e1bf6ea21b36c816ae5621007c" exitCode=0 Dec 06 05:30:02 crc kubenswrapper[4706]: I1206 05:30:02.871296 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" event={"ID":"a8e37b91-30b0-44fe-96d5-e01d222993b8","Type":"ContainerDied","Data":"75f1758be77ed1bb0894a18feb0b5c390ebc10e1bf6ea21b36c816ae5621007c"} Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.218857 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.398500 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a8e37b91-30b0-44fe-96d5-e01d222993b8-config-volume\") pod \"a8e37b91-30b0-44fe-96d5-e01d222993b8\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.398601 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-87ts9\" (UniqueName: \"kubernetes.io/projected/a8e37b91-30b0-44fe-96d5-e01d222993b8-kube-api-access-87ts9\") pod \"a8e37b91-30b0-44fe-96d5-e01d222993b8\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.398629 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a8e37b91-30b0-44fe-96d5-e01d222993b8-secret-volume\") pod \"a8e37b91-30b0-44fe-96d5-e01d222993b8\" (UID: \"a8e37b91-30b0-44fe-96d5-e01d222993b8\") " Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.401346 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8e37b91-30b0-44fe-96d5-e01d222993b8-config-volume" (OuterVolumeSpecName: "config-volume") pod "a8e37b91-30b0-44fe-96d5-e01d222993b8" (UID: "a8e37b91-30b0-44fe-96d5-e01d222993b8"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.406675 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a8e37b91-30b0-44fe-96d5-e01d222993b8-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a8e37b91-30b0-44fe-96d5-e01d222993b8" (UID: "a8e37b91-30b0-44fe-96d5-e01d222993b8"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.407096 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8e37b91-30b0-44fe-96d5-e01d222993b8-kube-api-access-87ts9" (OuterVolumeSpecName: "kube-api-access-87ts9") pod "a8e37b91-30b0-44fe-96d5-e01d222993b8" (UID: "a8e37b91-30b0-44fe-96d5-e01d222993b8"). InnerVolumeSpecName "kube-api-access-87ts9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.499955 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a8e37b91-30b0-44fe-96d5-e01d222993b8-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.499992 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-87ts9\" (UniqueName: \"kubernetes.io/projected/a8e37b91-30b0-44fe-96d5-e01d222993b8-kube-api-access-87ts9\") on node \"crc\" DevicePath \"\"" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.500009 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a8e37b91-30b0-44fe-96d5-e01d222993b8-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.887317 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" event={"ID":"a8e37b91-30b0-44fe-96d5-e01d222993b8","Type":"ContainerDied","Data":"5d25c65433d936de5f95e41b4671ef88c17f9e0a5e4461f8ba8b35bffefcc259"} Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.887364 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d25c65433d936de5f95e41b4671ef88c17f9e0a5e4461f8ba8b35bffefcc259" Dec 06 05:30:04 crc kubenswrapper[4706]: I1206 05:30:04.887387 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj" Dec 06 05:30:05 crc kubenswrapper[4706]: I1206 05:30:05.962300 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:30:05 crc kubenswrapper[4706]: I1206 05:30:05.962734 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:30:35 crc kubenswrapper[4706]: I1206 05:30:35.961567 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:30:35 crc kubenswrapper[4706]: I1206 05:30:35.962283 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:30:35 crc kubenswrapper[4706]: I1206 05:30:35.962346 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:30:35 crc kubenswrapper[4706]: I1206 05:30:35.963087 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d41cbbb0ceb6ccc8501ce4b75011f83163d456684ff13944b7d6b7c128f476e3"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:30:35 crc kubenswrapper[4706]: I1206 05:30:35.963193 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://d41cbbb0ceb6ccc8501ce4b75011f83163d456684ff13944b7d6b7c128f476e3" gracePeriod=600 Dec 06 05:30:37 crc kubenswrapper[4706]: I1206 05:30:37.083720 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="d41cbbb0ceb6ccc8501ce4b75011f83163d456684ff13944b7d6b7c128f476e3" exitCode=0 Dec 06 05:30:37 crc kubenswrapper[4706]: I1206 05:30:37.083768 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"d41cbbb0ceb6ccc8501ce4b75011f83163d456684ff13944b7d6b7c128f476e3"} Dec 06 05:30:37 crc kubenswrapper[4706]: I1206 05:30:37.083801 4706 scope.go:117] "RemoveContainer" containerID="a50b611b00cc5b19681640fa0163c59ec199ee057feb6e3aa5bd246ae8a33948" Dec 06 05:30:39 crc kubenswrapper[4706]: I1206 05:30:39.102681 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"d351fc246d2774fcded6a1058eb8824f36d694019b784e663bb46cc68f90094f"} Dec 06 05:32:52 crc kubenswrapper[4706]: I1206 05:32:52.589558 4706 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Dec 06 05:33:05 crc kubenswrapper[4706]: I1206 05:33:05.961794 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:33:05 crc kubenswrapper[4706]: I1206 05:33:05.962415 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:33:35 crc kubenswrapper[4706]: I1206 05:33:35.961532 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:33:35 crc kubenswrapper[4706]: I1206 05:33:35.963968 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.584879 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4c8sv"] Dec 06 05:33:42 crc kubenswrapper[4706]: E1206 05:33:42.585466 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a8e37b91-30b0-44fe-96d5-e01d222993b8" containerName="collect-profiles" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.585481 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a8e37b91-30b0-44fe-96d5-e01d222993b8" containerName="collect-profiles" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.585607 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a8e37b91-30b0-44fe-96d5-e01d222993b8" containerName="collect-profiles" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.586510 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.593139 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4c8sv"] Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.686967 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-catalog-content\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.687288 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnnpb\" (UniqueName: \"kubernetes.io/projected/08a3bae1-4328-437e-a2b2-724888186960-kube-api-access-rnnpb\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.687317 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-utilities\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.788364 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-catalog-content\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.788441 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnnpb\" (UniqueName: \"kubernetes.io/projected/08a3bae1-4328-437e-a2b2-724888186960-kube-api-access-rnnpb\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.788470 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-utilities\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.789427 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-catalog-content\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.789474 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-utilities\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.811351 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnnpb\" (UniqueName: \"kubernetes.io/projected/08a3bae1-4328-437e-a2b2-724888186960-kube-api-access-rnnpb\") pod \"community-operators-4c8sv\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:42 crc kubenswrapper[4706]: I1206 05:33:42.944570 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:43 crc kubenswrapper[4706]: I1206 05:33:43.205646 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4c8sv"] Dec 06 05:33:43 crc kubenswrapper[4706]: I1206 05:33:43.331600 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4c8sv" event={"ID":"08a3bae1-4328-437e-a2b2-724888186960","Type":"ContainerStarted","Data":"f2acf4986778f2efb3bc721fc96c31074bde5247ecd9e7190250ed47a13adcc6"} Dec 06 05:33:44 crc kubenswrapper[4706]: I1206 05:33:44.343493 4706 generic.go:334] "Generic (PLEG): container finished" podID="08a3bae1-4328-437e-a2b2-724888186960" containerID="bc2584e06289f99003a14d36ede2d6acbf4e05dca07475db9db9a094daa3a7c2" exitCode=0 Dec 06 05:33:44 crc kubenswrapper[4706]: I1206 05:33:44.343574 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4c8sv" event={"ID":"08a3bae1-4328-437e-a2b2-724888186960","Type":"ContainerDied","Data":"bc2584e06289f99003a14d36ede2d6acbf4e05dca07475db9db9a094daa3a7c2"} Dec 06 05:33:44 crc kubenswrapper[4706]: I1206 05:33:44.347304 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:33:46 crc kubenswrapper[4706]: I1206 05:33:46.360300 4706 generic.go:334] "Generic (PLEG): container finished" podID="08a3bae1-4328-437e-a2b2-724888186960" containerID="85d3e85f194da097beff9a27b2aa1a6cba1fc00347c399dd76dedfb7329c9470" exitCode=0 Dec 06 05:33:46 crc kubenswrapper[4706]: I1206 05:33:46.360353 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4c8sv" event={"ID":"08a3bae1-4328-437e-a2b2-724888186960","Type":"ContainerDied","Data":"85d3e85f194da097beff9a27b2aa1a6cba1fc00347c399dd76dedfb7329c9470"} Dec 06 05:33:48 crc kubenswrapper[4706]: I1206 05:33:48.374562 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4c8sv" event={"ID":"08a3bae1-4328-437e-a2b2-724888186960","Type":"ContainerStarted","Data":"1ca3ced2874a0d33ba9f2c5ce711c9af6ccf92a1d3c5a7173b4c8d0740b9c2a4"} Dec 06 05:33:48 crc kubenswrapper[4706]: I1206 05:33:48.395213 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4c8sv" podStartSLOduration=3.513368619 podStartE2EDuration="6.395195146s" podCreationTimestamp="2025-12-06 05:33:42 +0000 UTC" firstStartedPulling="2025-12-06 05:33:44.346687139 +0000 UTC m=+846.674511123" lastFinishedPulling="2025-12-06 05:33:47.228513706 +0000 UTC m=+849.556337650" observedRunningTime="2025-12-06 05:33:48.391335262 +0000 UTC m=+850.719159236" watchObservedRunningTime="2025-12-06 05:33:48.395195146 +0000 UTC m=+850.723019090" Dec 06 05:33:52 crc kubenswrapper[4706]: I1206 05:33:52.945573 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:52 crc kubenswrapper[4706]: I1206 05:33:52.946153 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:52 crc kubenswrapper[4706]: I1206 05:33:52.997855 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:53 crc kubenswrapper[4706]: I1206 05:33:53.461991 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:53 crc kubenswrapper[4706]: I1206 05:33:53.505259 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4c8sv"] Dec 06 05:33:55 crc kubenswrapper[4706]: I1206 05:33:55.415189 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4c8sv" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="registry-server" containerID="cri-o://1ca3ced2874a0d33ba9f2c5ce711c9af6ccf92a1d3c5a7173b4c8d0740b9c2a4" gracePeriod=2 Dec 06 05:33:57 crc kubenswrapper[4706]: I1206 05:33:57.428010 4706 generic.go:334] "Generic (PLEG): container finished" podID="08a3bae1-4328-437e-a2b2-724888186960" containerID="1ca3ced2874a0d33ba9f2c5ce711c9af6ccf92a1d3c5a7173b4c8d0740b9c2a4" exitCode=0 Dec 06 05:33:57 crc kubenswrapper[4706]: I1206 05:33:57.428110 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4c8sv" event={"ID":"08a3bae1-4328-437e-a2b2-724888186960","Type":"ContainerDied","Data":"1ca3ced2874a0d33ba9f2c5ce711c9af6ccf92a1d3c5a7173b4c8d0740b9c2a4"} Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.660315 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.717339 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-utilities\") pod \"08a3bae1-4328-437e-a2b2-724888186960\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.717408 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnnpb\" (UniqueName: \"kubernetes.io/projected/08a3bae1-4328-437e-a2b2-724888186960-kube-api-access-rnnpb\") pod \"08a3bae1-4328-437e-a2b2-724888186960\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.717450 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-catalog-content\") pod \"08a3bae1-4328-437e-a2b2-724888186960\" (UID: \"08a3bae1-4328-437e-a2b2-724888186960\") " Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.718511 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-utilities" (OuterVolumeSpecName: "utilities") pod "08a3bae1-4328-437e-a2b2-724888186960" (UID: "08a3bae1-4328-437e-a2b2-724888186960"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.723629 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/08a3bae1-4328-437e-a2b2-724888186960-kube-api-access-rnnpb" (OuterVolumeSpecName: "kube-api-access-rnnpb") pod "08a3bae1-4328-437e-a2b2-724888186960" (UID: "08a3bae1-4328-437e-a2b2-724888186960"). InnerVolumeSpecName "kube-api-access-rnnpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.729543 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.729577 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnnpb\" (UniqueName: \"kubernetes.io/projected/08a3bae1-4328-437e-a2b2-724888186960-kube-api-access-rnnpb\") on node \"crc\" DevicePath \"\"" Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.782942 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "08a3bae1-4328-437e-a2b2-724888186960" (UID: "08a3bae1-4328-437e-a2b2-724888186960"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:33:58 crc kubenswrapper[4706]: I1206 05:33:58.830147 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/08a3bae1-4328-437e-a2b2-724888186960-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.448688 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4c8sv" event={"ID":"08a3bae1-4328-437e-a2b2-724888186960","Type":"ContainerDied","Data":"f2acf4986778f2efb3bc721fc96c31074bde5247ecd9e7190250ed47a13adcc6"} Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.448782 4706 scope.go:117] "RemoveContainer" containerID="1ca3ced2874a0d33ba9f2c5ce711c9af6ccf92a1d3c5a7173b4c8d0740b9c2a4" Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.449140 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4c8sv" Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.484475 4706 scope.go:117] "RemoveContainer" containerID="85d3e85f194da097beff9a27b2aa1a6cba1fc00347c399dd76dedfb7329c9470" Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.493352 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4c8sv"] Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.502769 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4c8sv"] Dec 06 05:33:59 crc kubenswrapper[4706]: I1206 05:33:59.508339 4706 scope.go:117] "RemoveContainer" containerID="bc2584e06289f99003a14d36ede2d6acbf4e05dca07475db9db9a094daa3a7c2" Dec 06 05:34:00 crc kubenswrapper[4706]: I1206 05:34:00.043815 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="08a3bae1-4328-437e-a2b2-724888186960" path="/var/lib/kubelet/pods/08a3bae1-4328-437e-a2b2-724888186960/volumes" Dec 06 05:34:05 crc kubenswrapper[4706]: I1206 05:34:05.961915 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:34:05 crc kubenswrapper[4706]: I1206 05:34:05.962429 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:34:05 crc kubenswrapper[4706]: I1206 05:34:05.962534 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:34:05 crc kubenswrapper[4706]: I1206 05:34:05.963535 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d351fc246d2774fcded6a1058eb8824f36d694019b784e663bb46cc68f90094f"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:34:05 crc kubenswrapper[4706]: I1206 05:34:05.963646 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://d351fc246d2774fcded6a1058eb8824f36d694019b784e663bb46cc68f90094f" gracePeriod=600 Dec 06 05:34:06 crc kubenswrapper[4706]: I1206 05:34:06.491407 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="d351fc246d2774fcded6a1058eb8824f36d694019b784e663bb46cc68f90094f" exitCode=0 Dec 06 05:34:06 crc kubenswrapper[4706]: I1206 05:34:06.491456 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"d351fc246d2774fcded6a1058eb8824f36d694019b784e663bb46cc68f90094f"} Dec 06 05:34:06 crc kubenswrapper[4706]: I1206 05:34:06.491493 4706 scope.go:117] "RemoveContainer" containerID="d41cbbb0ceb6ccc8501ce4b75011f83163d456684ff13944b7d6b7c128f476e3" Dec 06 05:34:07 crc kubenswrapper[4706]: I1206 05:34:07.500696 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"5837ae2ad3340b198002bcadcaff039fe17103dc504dd99a597185b1f1d89acf"} Dec 06 05:36:35 crc kubenswrapper[4706]: I1206 05:36:35.961686 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:36:35 crc kubenswrapper[4706]: I1206 05:36:35.962406 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.664005 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-x967w"] Dec 06 05:36:41 crc kubenswrapper[4706]: E1206 05:36:41.664871 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="registry-server" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.664900 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="registry-server" Dec 06 05:36:41 crc kubenswrapper[4706]: E1206 05:36:41.664940 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="extract-content" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.664956 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="extract-content" Dec 06 05:36:41 crc kubenswrapper[4706]: E1206 05:36:41.664974 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="extract-utilities" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.664993 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="extract-utilities" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.665259 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="08a3bae1-4328-437e-a2b2-724888186960" containerName="registry-server" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.667209 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.680034 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-utilities\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.680154 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-catalog-content\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.680195 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fq5z\" (UniqueName: \"kubernetes.io/projected/e2770752-dad4-4afa-80f1-e15bc37b2760-kube-api-access-8fq5z\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.686805 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x967w"] Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.780749 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-utilities\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.781125 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-catalog-content\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.781156 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fq5z\" (UniqueName: \"kubernetes.io/projected/e2770752-dad4-4afa-80f1-e15bc37b2760-kube-api-access-8fq5z\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.781421 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-utilities\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.781634 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-catalog-content\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:41 crc kubenswrapper[4706]: I1206 05:36:41.804779 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fq5z\" (UniqueName: \"kubernetes.io/projected/e2770752-dad4-4afa-80f1-e15bc37b2760-kube-api-access-8fq5z\") pod \"redhat-operators-x967w\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:42 crc kubenswrapper[4706]: I1206 05:36:42.030765 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:42 crc kubenswrapper[4706]: I1206 05:36:42.416878 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-x967w"] Dec 06 05:36:42 crc kubenswrapper[4706]: I1206 05:36:42.501223 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerStarted","Data":"63a18ff808450686ef3f13973f3d40a07b4bc8e929f58da146f62d149af6aee3"} Dec 06 05:36:43 crc kubenswrapper[4706]: I1206 05:36:43.511642 4706 generic.go:334] "Generic (PLEG): container finished" podID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerID="9b9589fee4ef41db2ccbb14c62da91f354cecad35c2e3ad38eb7b76a069a0f87" exitCode=0 Dec 06 05:36:43 crc kubenswrapper[4706]: I1206 05:36:43.511705 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerDied","Data":"9b9589fee4ef41db2ccbb14c62da91f354cecad35c2e3ad38eb7b76a069a0f87"} Dec 06 05:36:44 crc kubenswrapper[4706]: I1206 05:36:44.519768 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerStarted","Data":"60a7bcfc295c406df3bc7894ccfaff14b4f37faa1dc01151c604782f6116eba7"} Dec 06 05:36:45 crc kubenswrapper[4706]: I1206 05:36:45.528915 4706 generic.go:334] "Generic (PLEG): container finished" podID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerID="60a7bcfc295c406df3bc7894ccfaff14b4f37faa1dc01151c604782f6116eba7" exitCode=0 Dec 06 05:36:45 crc kubenswrapper[4706]: I1206 05:36:45.528983 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerDied","Data":"60a7bcfc295c406df3bc7894ccfaff14b4f37faa1dc01151c604782f6116eba7"} Dec 06 05:36:46 crc kubenswrapper[4706]: I1206 05:36:46.537608 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerStarted","Data":"15bd2e1ddb0bdc57ac7042ff0fb71688f30f06275e5d25a74ae6bad32b54b27c"} Dec 06 05:36:46 crc kubenswrapper[4706]: I1206 05:36:46.564242 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-x967w" podStartSLOduration=3.145764861 podStartE2EDuration="5.564223154s" podCreationTimestamp="2025-12-06 05:36:41 +0000 UTC" firstStartedPulling="2025-12-06 05:36:43.515490071 +0000 UTC m=+1025.843314055" lastFinishedPulling="2025-12-06 05:36:45.933948384 +0000 UTC m=+1028.261772348" observedRunningTime="2025-12-06 05:36:46.560954256 +0000 UTC m=+1028.888778220" watchObservedRunningTime="2025-12-06 05:36:46.564223154 +0000 UTC m=+1028.892047108" Dec 06 05:36:52 crc kubenswrapper[4706]: I1206 05:36:52.030982 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:52 crc kubenswrapper[4706]: I1206 05:36:52.031434 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.054114 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lfqv5"] Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.055441 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.063702 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfqv5"] Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.093540 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-x967w" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="registry-server" probeResult="failure" output=< Dec 06 05:36:53 crc kubenswrapper[4706]: timeout: failed to connect service ":50051" within 1s Dec 06 05:36:53 crc kubenswrapper[4706]: > Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.231553 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-catalog-content\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.231639 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f78pv\" (UniqueName: \"kubernetes.io/projected/cea174e2-599d-4af2-a2cd-a2614899d99d-kube-api-access-f78pv\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.231678 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-utilities\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.332623 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-utilities\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.332697 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-catalog-content\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.332773 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f78pv\" (UniqueName: \"kubernetes.io/projected/cea174e2-599d-4af2-a2cd-a2614899d99d-kube-api-access-f78pv\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.333093 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-utilities\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.333170 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-catalog-content\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.355892 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f78pv\" (UniqueName: \"kubernetes.io/projected/cea174e2-599d-4af2-a2cd-a2614899d99d-kube-api-access-f78pv\") pod \"redhat-marketplace-lfqv5\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.375561 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:36:53 crc kubenswrapper[4706]: I1206 05:36:53.792700 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfqv5"] Dec 06 05:36:54 crc kubenswrapper[4706]: I1206 05:36:54.607079 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfqv5" event={"ID":"cea174e2-599d-4af2-a2cd-a2614899d99d","Type":"ContainerStarted","Data":"6cbd77b0e12a1e409b70bc8c2cd59776a7f88616fa824867573f29c696c081cc"} Dec 06 05:36:56 crc kubenswrapper[4706]: I1206 05:36:56.617979 4706 generic.go:334] "Generic (PLEG): container finished" podID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerID="4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80" exitCode=0 Dec 06 05:36:56 crc kubenswrapper[4706]: I1206 05:36:56.618091 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfqv5" event={"ID":"cea174e2-599d-4af2-a2cd-a2614899d99d","Type":"ContainerDied","Data":"4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80"} Dec 06 05:36:57 crc kubenswrapper[4706]: I1206 05:36:57.626203 4706 generic.go:334] "Generic (PLEG): container finished" podID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerID="212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d" exitCode=0 Dec 06 05:36:57 crc kubenswrapper[4706]: I1206 05:36:57.626355 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfqv5" event={"ID":"cea174e2-599d-4af2-a2cd-a2614899d99d","Type":"ContainerDied","Data":"212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d"} Dec 06 05:36:58 crc kubenswrapper[4706]: I1206 05:36:58.634034 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfqv5" event={"ID":"cea174e2-599d-4af2-a2cd-a2614899d99d","Type":"ContainerStarted","Data":"7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11"} Dec 06 05:37:02 crc kubenswrapper[4706]: I1206 05:37:02.088818 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:37:02 crc kubenswrapper[4706]: I1206 05:37:02.109374 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lfqv5" podStartSLOduration=7.548875789 podStartE2EDuration="9.109351455s" podCreationTimestamp="2025-12-06 05:36:53 +0000 UTC" firstStartedPulling="2025-12-06 05:36:56.620315869 +0000 UTC m=+1038.948139823" lastFinishedPulling="2025-12-06 05:36:58.180791505 +0000 UTC m=+1040.508615489" observedRunningTime="2025-12-06 05:36:58.656466776 +0000 UTC m=+1040.984290780" watchObservedRunningTime="2025-12-06 05:37:02.109351455 +0000 UTC m=+1044.437175419" Dec 06 05:37:02 crc kubenswrapper[4706]: I1206 05:37:02.133687 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:37:02 crc kubenswrapper[4706]: I1206 05:37:02.323012 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x967w"] Dec 06 05:37:03 crc kubenswrapper[4706]: I1206 05:37:03.376018 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:37:03 crc kubenswrapper[4706]: I1206 05:37:03.376095 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:37:03 crc kubenswrapper[4706]: I1206 05:37:03.414652 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:37:03 crc kubenswrapper[4706]: I1206 05:37:03.666983 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-x967w" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="registry-server" containerID="cri-o://15bd2e1ddb0bdc57ac7042ff0fb71688f30f06275e5d25a74ae6bad32b54b27c" gracePeriod=2 Dec 06 05:37:03 crc kubenswrapper[4706]: I1206 05:37:03.706713 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:37:04 crc kubenswrapper[4706]: I1206 05:37:04.725936 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfqv5"] Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.682556 4706 generic.go:334] "Generic (PLEG): container finished" podID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerID="15bd2e1ddb0bdc57ac7042ff0fb71688f30f06275e5d25a74ae6bad32b54b27c" exitCode=0 Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.683115 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lfqv5" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="registry-server" containerID="cri-o://7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11" gracePeriod=2 Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.682759 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerDied","Data":"15bd2e1ddb0bdc57ac7042ff0fb71688f30f06275e5d25a74ae6bad32b54b27c"} Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.816416 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.895413 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-catalog-content\") pod \"e2770752-dad4-4afa-80f1-e15bc37b2760\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.895497 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8fq5z\" (UniqueName: \"kubernetes.io/projected/e2770752-dad4-4afa-80f1-e15bc37b2760-kube-api-access-8fq5z\") pod \"e2770752-dad4-4afa-80f1-e15bc37b2760\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.895519 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-utilities\") pod \"e2770752-dad4-4afa-80f1-e15bc37b2760\" (UID: \"e2770752-dad4-4afa-80f1-e15bc37b2760\") " Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.896458 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-utilities" (OuterVolumeSpecName: "utilities") pod "e2770752-dad4-4afa-80f1-e15bc37b2760" (UID: "e2770752-dad4-4afa-80f1-e15bc37b2760"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.902488 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2770752-dad4-4afa-80f1-e15bc37b2760-kube-api-access-8fq5z" (OuterVolumeSpecName: "kube-api-access-8fq5z") pod "e2770752-dad4-4afa-80f1-e15bc37b2760" (UID: "e2770752-dad4-4afa-80f1-e15bc37b2760"). InnerVolumeSpecName "kube-api-access-8fq5z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.961962 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.962203 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.993003 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e2770752-dad4-4afa-80f1-e15bc37b2760" (UID: "e2770752-dad4-4afa-80f1-e15bc37b2760"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.998561 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.998611 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8fq5z\" (UniqueName: \"kubernetes.io/projected/e2770752-dad4-4afa-80f1-e15bc37b2760-kube-api-access-8fq5z\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:05 crc kubenswrapper[4706]: I1206 05:37:05.998634 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2770752-dad4-4afa-80f1-e15bc37b2760-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.443834 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.505782 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-catalog-content\") pod \"cea174e2-599d-4af2-a2cd-a2614899d99d\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.505870 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f78pv\" (UniqueName: \"kubernetes.io/projected/cea174e2-599d-4af2-a2cd-a2614899d99d-kube-api-access-f78pv\") pod \"cea174e2-599d-4af2-a2cd-a2614899d99d\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.505981 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-utilities\") pod \"cea174e2-599d-4af2-a2cd-a2614899d99d\" (UID: \"cea174e2-599d-4af2-a2cd-a2614899d99d\") " Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.507757 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-utilities" (OuterVolumeSpecName: "utilities") pod "cea174e2-599d-4af2-a2cd-a2614899d99d" (UID: "cea174e2-599d-4af2-a2cd-a2614899d99d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.511032 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cea174e2-599d-4af2-a2cd-a2614899d99d-kube-api-access-f78pv" (OuterVolumeSpecName: "kube-api-access-f78pv") pod "cea174e2-599d-4af2-a2cd-a2614899d99d" (UID: "cea174e2-599d-4af2-a2cd-a2614899d99d"). InnerVolumeSpecName "kube-api-access-f78pv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.525374 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cea174e2-599d-4af2-a2cd-a2614899d99d" (UID: "cea174e2-599d-4af2-a2cd-a2614899d99d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.607855 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.608089 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cea174e2-599d-4af2-a2cd-a2614899d99d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.608151 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f78pv\" (UniqueName: \"kubernetes.io/projected/cea174e2-599d-4af2-a2cd-a2614899d99d-kube-api-access-f78pv\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.691265 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-x967w" event={"ID":"e2770752-dad4-4afa-80f1-e15bc37b2760","Type":"ContainerDied","Data":"63a18ff808450686ef3f13973f3d40a07b4bc8e929f58da146f62d149af6aee3"} Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.691446 4706 scope.go:117] "RemoveContainer" containerID="15bd2e1ddb0bdc57ac7042ff0fb71688f30f06275e5d25a74ae6bad32b54b27c" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.691750 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-x967w" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.694151 4706 generic.go:334] "Generic (PLEG): container finished" podID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerID="7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11" exitCode=0 Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.694191 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfqv5" event={"ID":"cea174e2-599d-4af2-a2cd-a2614899d99d","Type":"ContainerDied","Data":"7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11"} Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.694221 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lfqv5" event={"ID":"cea174e2-599d-4af2-a2cd-a2614899d99d","Type":"ContainerDied","Data":"6cbd77b0e12a1e409b70bc8c2cd59776a7f88616fa824867573f29c696c081cc"} Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.694297 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lfqv5" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.715416 4706 scope.go:117] "RemoveContainer" containerID="60a7bcfc295c406df3bc7894ccfaff14b4f37faa1dc01151c604782f6116eba7" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.723253 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-x967w"] Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.731909 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-x967w"] Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.738723 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfqv5"] Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.742089 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lfqv5"] Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.745490 4706 scope.go:117] "RemoveContainer" containerID="9b9589fee4ef41db2ccbb14c62da91f354cecad35c2e3ad38eb7b76a069a0f87" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.757361 4706 scope.go:117] "RemoveContainer" containerID="7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.773310 4706 scope.go:117] "RemoveContainer" containerID="212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.810679 4706 scope.go:117] "RemoveContainer" containerID="4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.822010 4706 scope.go:117] "RemoveContainer" containerID="7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11" Dec 06 05:37:06 crc kubenswrapper[4706]: E1206 05:37:06.823309 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11\": container with ID starting with 7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11 not found: ID does not exist" containerID="7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.823376 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11"} err="failed to get container status \"7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11\": rpc error: code = NotFound desc = could not find container \"7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11\": container with ID starting with 7dd3bfd26118a80b96e98da21f98bb5a2828dd4a01457bad15e3b73730d76c11 not found: ID does not exist" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.823415 4706 scope.go:117] "RemoveContainer" containerID="212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d" Dec 06 05:37:06 crc kubenswrapper[4706]: E1206 05:37:06.823891 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d\": container with ID starting with 212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d not found: ID does not exist" containerID="212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.823939 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d"} err="failed to get container status \"212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d\": rpc error: code = NotFound desc = could not find container \"212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d\": container with ID starting with 212f498d029058d9f9f59b11b17b7707acc2bfeaaec4646d15f27beb373fea8d not found: ID does not exist" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.823963 4706 scope.go:117] "RemoveContainer" containerID="4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80" Dec 06 05:37:06 crc kubenswrapper[4706]: E1206 05:37:06.824498 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80\": container with ID starting with 4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80 not found: ID does not exist" containerID="4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80" Dec 06 05:37:06 crc kubenswrapper[4706]: I1206 05:37:06.824550 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80"} err="failed to get container status \"4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80\": rpc error: code = NotFound desc = could not find container \"4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80\": container with ID starting with 4bc39cbee7d2df976265b207df6921b7de9e9fa0c13e1930c882c42fb9954e80 not found: ID does not exist" Dec 06 05:37:08 crc kubenswrapper[4706]: I1206 05:37:08.046892 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" path="/var/lib/kubelet/pods/cea174e2-599d-4af2-a2cd-a2614899d99d/volumes" Dec 06 05:37:08 crc kubenswrapper[4706]: I1206 05:37:08.048396 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" path="/var/lib/kubelet/pods/e2770752-dad4-4afa-80f1-e15bc37b2760/volumes" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.359135 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6k6sm"] Dec 06 05:37:28 crc kubenswrapper[4706]: E1206 05:37:28.360005 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="extract-content" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360023 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="extract-content" Dec 06 05:37:28 crc kubenswrapper[4706]: E1206 05:37:28.360068 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="extract-utilities" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360082 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="extract-utilities" Dec 06 05:37:28 crc kubenswrapper[4706]: E1206 05:37:28.360099 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="registry-server" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360109 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="registry-server" Dec 06 05:37:28 crc kubenswrapper[4706]: E1206 05:37:28.360134 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="registry-server" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360142 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="registry-server" Dec 06 05:37:28 crc kubenswrapper[4706]: E1206 05:37:28.360153 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="extract-content" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360161 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="extract-content" Dec 06 05:37:28 crc kubenswrapper[4706]: E1206 05:37:28.360174 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="extract-utilities" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360182 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="extract-utilities" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360301 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="cea174e2-599d-4af2-a2cd-a2614899d99d" containerName="registry-server" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.360312 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2770752-dad4-4afa-80f1-e15bc37b2760" containerName="registry-server" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.361442 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.372020 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6k6sm"] Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.493543 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-utilities\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.493604 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m972\" (UniqueName: \"kubernetes.io/projected/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-kube-api-access-8m972\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.493660 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-catalog-content\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.595262 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-utilities\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.595326 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m972\" (UniqueName: \"kubernetes.io/projected/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-kube-api-access-8m972\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.595369 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-catalog-content\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.595814 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-utilities\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.595838 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-catalog-content\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.627762 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m972\" (UniqueName: \"kubernetes.io/projected/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-kube-api-access-8m972\") pod \"certified-operators-6k6sm\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.693443 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:28 crc kubenswrapper[4706]: I1206 05:37:28.895702 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6k6sm"] Dec 06 05:37:29 crc kubenswrapper[4706]: I1206 05:37:29.826664 4706 generic.go:334] "Generic (PLEG): container finished" podID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerID="7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c" exitCode=0 Dec 06 05:37:29 crc kubenswrapper[4706]: I1206 05:37:29.826782 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6k6sm" event={"ID":"aa8f7b7c-ef9a-4856-b1ec-5623645d338f","Type":"ContainerDied","Data":"7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c"} Dec 06 05:37:29 crc kubenswrapper[4706]: I1206 05:37:29.827094 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6k6sm" event={"ID":"aa8f7b7c-ef9a-4856-b1ec-5623645d338f","Type":"ContainerStarted","Data":"e108dc06690ec1febee617c75745bb03c9499162530fc85509d85fd2830eddb8"} Dec 06 05:37:31 crc kubenswrapper[4706]: I1206 05:37:31.838292 4706 generic.go:334] "Generic (PLEG): container finished" podID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerID="da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e" exitCode=0 Dec 06 05:37:31 crc kubenswrapper[4706]: I1206 05:37:31.838400 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6k6sm" event={"ID":"aa8f7b7c-ef9a-4856-b1ec-5623645d338f","Type":"ContainerDied","Data":"da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e"} Dec 06 05:37:32 crc kubenswrapper[4706]: I1206 05:37:32.845223 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6k6sm" event={"ID":"aa8f7b7c-ef9a-4856-b1ec-5623645d338f","Type":"ContainerStarted","Data":"4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693"} Dec 06 05:37:32 crc kubenswrapper[4706]: I1206 05:37:32.863230 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6k6sm" podStartSLOduration=2.421567842 podStartE2EDuration="4.863208111s" podCreationTimestamp="2025-12-06 05:37:28 +0000 UTC" firstStartedPulling="2025-12-06 05:37:29.829107055 +0000 UTC m=+1072.156930999" lastFinishedPulling="2025-12-06 05:37:32.270747314 +0000 UTC m=+1074.598571268" observedRunningTime="2025-12-06 05:37:32.862150853 +0000 UTC m=+1075.189974807" watchObservedRunningTime="2025-12-06 05:37:32.863208111 +0000 UTC m=+1075.191032065" Dec 06 05:37:35 crc kubenswrapper[4706]: I1206 05:37:35.961214 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:37:35 crc kubenswrapper[4706]: I1206 05:37:35.961554 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:37:35 crc kubenswrapper[4706]: I1206 05:37:35.961600 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:37:35 crc kubenswrapper[4706]: I1206 05:37:35.962227 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5837ae2ad3340b198002bcadcaff039fe17103dc504dd99a597185b1f1d89acf"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:37:35 crc kubenswrapper[4706]: I1206 05:37:35.962281 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://5837ae2ad3340b198002bcadcaff039fe17103dc504dd99a597185b1f1d89acf" gracePeriod=600 Dec 06 05:37:36 crc kubenswrapper[4706]: I1206 05:37:36.877805 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="5837ae2ad3340b198002bcadcaff039fe17103dc504dd99a597185b1f1d89acf" exitCode=0 Dec 06 05:37:36 crc kubenswrapper[4706]: I1206 05:37:36.878031 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"5837ae2ad3340b198002bcadcaff039fe17103dc504dd99a597185b1f1d89acf"} Dec 06 05:37:36 crc kubenswrapper[4706]: I1206 05:37:36.878245 4706 scope.go:117] "RemoveContainer" containerID="d351fc246d2774fcded6a1058eb8824f36d694019b784e663bb46cc68f90094f" Dec 06 05:37:37 crc kubenswrapper[4706]: I1206 05:37:37.886496 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"7cb88f72dc580dec882828d525bf28a4003301f3e0567fd190938d53e4a87ab0"} Dec 06 05:37:38 crc kubenswrapper[4706]: I1206 05:37:38.694074 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:38 crc kubenswrapper[4706]: I1206 05:37:38.694479 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:38 crc kubenswrapper[4706]: I1206 05:37:38.753666 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:38 crc kubenswrapper[4706]: I1206 05:37:38.927237 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:38 crc kubenswrapper[4706]: I1206 05:37:38.985831 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6k6sm"] Dec 06 05:37:40 crc kubenswrapper[4706]: I1206 05:37:40.903338 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6k6sm" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="registry-server" containerID="cri-o://4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693" gracePeriod=2 Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.320020 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.461475 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8m972\" (UniqueName: \"kubernetes.io/projected/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-kube-api-access-8m972\") pod \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.465184 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-catalog-content\") pod \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.465440 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-utilities\") pod \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\" (UID: \"aa8f7b7c-ef9a-4856-b1ec-5623645d338f\") " Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.467164 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-utilities" (OuterVolumeSpecName: "utilities") pod "aa8f7b7c-ef9a-4856-b1ec-5623645d338f" (UID: "aa8f7b7c-ef9a-4856-b1ec-5623645d338f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.468468 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.470647 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-kube-api-access-8m972" (OuterVolumeSpecName: "kube-api-access-8m972") pod "aa8f7b7c-ef9a-4856-b1ec-5623645d338f" (UID: "aa8f7b7c-ef9a-4856-b1ec-5623645d338f"). InnerVolumeSpecName "kube-api-access-8m972". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.514598 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aa8f7b7c-ef9a-4856-b1ec-5623645d338f" (UID: "aa8f7b7c-ef9a-4856-b1ec-5623645d338f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.569877 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8m972\" (UniqueName: \"kubernetes.io/projected/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-kube-api-access-8m972\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.570152 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aa8f7b7c-ef9a-4856-b1ec-5623645d338f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.909658 4706 generic.go:334] "Generic (PLEG): container finished" podID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerID="4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693" exitCode=0 Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.909705 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6k6sm" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.909708 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6k6sm" event={"ID":"aa8f7b7c-ef9a-4856-b1ec-5623645d338f","Type":"ContainerDied","Data":"4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693"} Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.909753 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6k6sm" event={"ID":"aa8f7b7c-ef9a-4856-b1ec-5623645d338f","Type":"ContainerDied","Data":"e108dc06690ec1febee617c75745bb03c9499162530fc85509d85fd2830eddb8"} Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.909774 4706 scope.go:117] "RemoveContainer" containerID="4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.925290 4706 scope.go:117] "RemoveContainer" containerID="da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.937361 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6k6sm"] Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.940867 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6k6sm"] Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.956822 4706 scope.go:117] "RemoveContainer" containerID="7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.974021 4706 scope.go:117] "RemoveContainer" containerID="4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693" Dec 06 05:37:41 crc kubenswrapper[4706]: E1206 05:37:41.974645 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693\": container with ID starting with 4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693 not found: ID does not exist" containerID="4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.974678 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693"} err="failed to get container status \"4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693\": rpc error: code = NotFound desc = could not find container \"4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693\": container with ID starting with 4b34708c6b06e1481a02717205c72e2fc4a8e1b45d8f89c9deefd57b40ee1693 not found: ID does not exist" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.974699 4706 scope.go:117] "RemoveContainer" containerID="da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e" Dec 06 05:37:41 crc kubenswrapper[4706]: E1206 05:37:41.975071 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e\": container with ID starting with da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e not found: ID does not exist" containerID="da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.975093 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e"} err="failed to get container status \"da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e\": rpc error: code = NotFound desc = could not find container \"da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e\": container with ID starting with da5d94a412ff3c32f2b0d89b0c5a2215c8c71cc362f7b08f15107b91de22cb0e not found: ID does not exist" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.975106 4706 scope.go:117] "RemoveContainer" containerID="7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c" Dec 06 05:37:41 crc kubenswrapper[4706]: E1206 05:37:41.975389 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c\": container with ID starting with 7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c not found: ID does not exist" containerID="7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c" Dec 06 05:37:41 crc kubenswrapper[4706]: I1206 05:37:41.975443 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c"} err="failed to get container status \"7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c\": rpc error: code = NotFound desc = could not find container \"7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c\": container with ID starting with 7172a68ce9777b8feb8cb588662ad9f4472b70c941e94b20acebbfe451e1a79c not found: ID does not exist" Dec 06 05:37:42 crc kubenswrapper[4706]: I1206 05:37:42.041570 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" path="/var/lib/kubelet/pods/aa8f7b7c-ef9a-4856-b1ec-5623645d338f/volumes" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.670783 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fbpqk"] Dec 06 05:38:21 crc kubenswrapper[4706]: E1206 05:38:21.671558 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="extract-content" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.671575 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="extract-content" Dec 06 05:38:21 crc kubenswrapper[4706]: E1206 05:38:21.671584 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="registry-server" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.671590 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="registry-server" Dec 06 05:38:21 crc kubenswrapper[4706]: E1206 05:38:21.671605 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="extract-utilities" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.671612 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="extract-utilities" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.671695 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="aa8f7b7c-ef9a-4856-b1ec-5623645d338f" containerName="registry-server" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.672166 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.675333 4706 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-lsvw7" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.675333 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.675434 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.684389 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-pf8gt"] Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.685115 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-pf8gt" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.687536 4706 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-2qwp7" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.688994 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fbpqk"] Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.693022 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-pf8gt"] Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.708608 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-22w82"] Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.709459 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.711937 4706 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-nv6qf" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.729105 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-22w82"] Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.808791 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmrlw\" (UniqueName: \"kubernetes.io/projected/052717cc-1d2a-4e9a-a6a3-897c1d529b1e-kube-api-access-tmrlw\") pod \"cert-manager-webhook-5655c58dd6-22w82\" (UID: \"052717cc-1d2a-4e9a-a6a3-897c1d529b1e\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.808879 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cv62\" (UniqueName: \"kubernetes.io/projected/44e622ec-7780-489c-bcf0-575ec84dc213-kube-api-access-4cv62\") pod \"cert-manager-5b446d88c5-pf8gt\" (UID: \"44e622ec-7780-489c-bcf0-575ec84dc213\") " pod="cert-manager/cert-manager-5b446d88c5-pf8gt" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.808952 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njpjk\" (UniqueName: \"kubernetes.io/projected/58b45d75-86f1-4092-89ba-a1f924030512-kube-api-access-njpjk\") pod \"cert-manager-cainjector-7f985d654d-fbpqk\" (UID: \"58b45d75-86f1-4092-89ba-a1f924030512\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.910108 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmrlw\" (UniqueName: \"kubernetes.io/projected/052717cc-1d2a-4e9a-a6a3-897c1d529b1e-kube-api-access-tmrlw\") pod \"cert-manager-webhook-5655c58dd6-22w82\" (UID: \"052717cc-1d2a-4e9a-a6a3-897c1d529b1e\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.910216 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cv62\" (UniqueName: \"kubernetes.io/projected/44e622ec-7780-489c-bcf0-575ec84dc213-kube-api-access-4cv62\") pod \"cert-manager-5b446d88c5-pf8gt\" (UID: \"44e622ec-7780-489c-bcf0-575ec84dc213\") " pod="cert-manager/cert-manager-5b446d88c5-pf8gt" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.910250 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njpjk\" (UniqueName: \"kubernetes.io/projected/58b45d75-86f1-4092-89ba-a1f924030512-kube-api-access-njpjk\") pod \"cert-manager-cainjector-7f985d654d-fbpqk\" (UID: \"58b45d75-86f1-4092-89ba-a1f924030512\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.926846 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmrlw\" (UniqueName: \"kubernetes.io/projected/052717cc-1d2a-4e9a-a6a3-897c1d529b1e-kube-api-access-tmrlw\") pod \"cert-manager-webhook-5655c58dd6-22w82\" (UID: \"052717cc-1d2a-4e9a-a6a3-897c1d529b1e\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.933123 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cv62\" (UniqueName: \"kubernetes.io/projected/44e622ec-7780-489c-bcf0-575ec84dc213-kube-api-access-4cv62\") pod \"cert-manager-5b446d88c5-pf8gt\" (UID: \"44e622ec-7780-489c-bcf0-575ec84dc213\") " pod="cert-manager/cert-manager-5b446d88c5-pf8gt" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.933829 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njpjk\" (UniqueName: \"kubernetes.io/projected/58b45d75-86f1-4092-89ba-a1f924030512-kube-api-access-njpjk\") pod \"cert-manager-cainjector-7f985d654d-fbpqk\" (UID: \"58b45d75-86f1-4092-89ba-a1f924030512\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.992487 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" Dec 06 05:38:21 crc kubenswrapper[4706]: I1206 05:38:21.999814 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-pf8gt" Dec 06 05:38:22 crc kubenswrapper[4706]: I1206 05:38:22.023087 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:22 crc kubenswrapper[4706]: I1206 05:38:22.229146 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fbpqk"] Dec 06 05:38:22 crc kubenswrapper[4706]: I1206 05:38:22.265146 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-pf8gt"] Dec 06 05:38:22 crc kubenswrapper[4706]: W1206 05:38:22.271242 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44e622ec_7780_489c_bcf0_575ec84dc213.slice/crio-6eba8c759b23bfa9b64f3c0288b9ba830ae34c96200ac841158d637e64452a19 WatchSource:0}: Error finding container 6eba8c759b23bfa9b64f3c0288b9ba830ae34c96200ac841158d637e64452a19: Status 404 returned error can't find the container with id 6eba8c759b23bfa9b64f3c0288b9ba830ae34c96200ac841158d637e64452a19 Dec 06 05:38:22 crc kubenswrapper[4706]: I1206 05:38:22.306811 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-22w82"] Dec 06 05:38:23 crc kubenswrapper[4706]: I1206 05:38:23.150067 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" event={"ID":"052717cc-1d2a-4e9a-a6a3-897c1d529b1e","Type":"ContainerStarted","Data":"1662a1614c3224c0f36f242986e92e43c55de92e7a1aec6a70d0168ce0ec45ed"} Dec 06 05:38:23 crc kubenswrapper[4706]: I1206 05:38:23.151456 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" event={"ID":"58b45d75-86f1-4092-89ba-a1f924030512","Type":"ContainerStarted","Data":"78c0c43cbf8f4aa4125715f373e4b98b05650ef0443d96a3357cd0c3879ccf48"} Dec 06 05:38:23 crc kubenswrapper[4706]: I1206 05:38:23.152365 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-pf8gt" event={"ID":"44e622ec-7780-489c-bcf0-575ec84dc213","Type":"ContainerStarted","Data":"6eba8c759b23bfa9b64f3c0288b9ba830ae34c96200ac841158d637e64452a19"} Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.176370 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-pf8gt" event={"ID":"44e622ec-7780-489c-bcf0-575ec84dc213","Type":"ContainerStarted","Data":"f397d4b10b6c3e6fff4da8dd546aae9473fed251cb0e1d72a236847a2a9a0c51"} Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.178234 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" event={"ID":"052717cc-1d2a-4e9a-a6a3-897c1d529b1e","Type":"ContainerStarted","Data":"796171acc75a07055250f630cddfdb7cc3bf9370e1017953882b86569d3faa76"} Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.178637 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.179938 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" event={"ID":"58b45d75-86f1-4092-89ba-a1f924030512","Type":"ContainerStarted","Data":"b94ba0cd78c1e8a2bfecc75df305615ed0ab58ad085dca542cd62e37f6f237fe"} Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.199028 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-pf8gt" podStartSLOduration=2.097726676 podStartE2EDuration="6.199008775s" podCreationTimestamp="2025-12-06 05:38:21 +0000 UTC" firstStartedPulling="2025-12-06 05:38:22.273484753 +0000 UTC m=+1124.601308697" lastFinishedPulling="2025-12-06 05:38:26.374766852 +0000 UTC m=+1128.702590796" observedRunningTime="2025-12-06 05:38:27.19373961 +0000 UTC m=+1129.521563554" watchObservedRunningTime="2025-12-06 05:38:27.199008775 +0000 UTC m=+1129.526832729" Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.217609 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-fbpqk" podStartSLOduration=2.157454617 podStartE2EDuration="6.217592826s" podCreationTimestamp="2025-12-06 05:38:21 +0000 UTC" firstStartedPulling="2025-12-06 05:38:22.240603849 +0000 UTC m=+1124.568427793" lastFinishedPulling="2025-12-06 05:38:26.300742058 +0000 UTC m=+1128.628566002" observedRunningTime="2025-12-06 05:38:27.21450317 +0000 UTC m=+1129.542327154" watchObservedRunningTime="2025-12-06 05:38:27.217592826 +0000 UTC m=+1129.545416780" Dec 06 05:38:27 crc kubenswrapper[4706]: I1206 05:38:27.233980 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" podStartSLOduration=2.241531407 podStartE2EDuration="6.233958595s" podCreationTimestamp="2025-12-06 05:38:21 +0000 UTC" firstStartedPulling="2025-12-06 05:38:22.315827506 +0000 UTC m=+1124.643651450" lastFinishedPulling="2025-12-06 05:38:26.308254704 +0000 UTC m=+1128.636078638" observedRunningTime="2025-12-06 05:38:27.228449073 +0000 UTC m=+1129.556273047" watchObservedRunningTime="2025-12-06 05:38:27.233958595 +0000 UTC m=+1129.561782539" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.026837 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-22w82" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.462526 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-l5xg7"] Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463180 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-controller" containerID="cri-o://c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463234 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463234 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="nbdb" containerID="cri-o://019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463340 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-node" containerID="cri-o://a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463360 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="northd" containerID="cri-o://097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463405 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-acl-logging" containerID="cri-o://8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.463467 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="sbdb" containerID="cri-o://b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.490520 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" containerID="cri-o://0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" gracePeriod=30 Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.797196 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/3.log" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.800339 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovn-acl-logging/0.log" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.800989 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovn-controller/0.log" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.801575 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848265 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-fxhmh"] Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848509 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848525 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848541 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kubecfg-setup" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848549 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kubecfg-setup" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848558 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848566 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848574 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848581 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848593 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-node" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848600 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-node" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848611 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-acl-logging" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848618 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-acl-logging" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848628 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="sbdb" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848634 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="sbdb" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848646 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-ovn-metrics" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848653 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-ovn-metrics" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848664 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="northd" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848672 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="northd" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848682 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848691 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848707 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848716 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.848729 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="nbdb" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848736 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="nbdb" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848838 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-node" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848850 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="northd" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848862 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-acl-logging" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848871 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848880 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="nbdb" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848890 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848901 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848910 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="sbdb" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848921 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="kube-rbac-proxy-ovn-metrics" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848930 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.848939 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovn-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: E1206 05:38:32.849067 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.849078 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.849183 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerName="ovnkube-controller" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.851113 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886823 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-ovn\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886860 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-var-lib-openvswitch\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886878 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-netd\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886899 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-systemd-units\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886921 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-netns\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886934 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886944 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-log-socket\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886975 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-log-socket" (OuterVolumeSpecName: "log-socket") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886977 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887001 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886990 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-slash\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.886977 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887033 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887012 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-slash" (OuterVolumeSpecName: "host-slash") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887100 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-ovn-kubernetes\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887110 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887154 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-config\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887183 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-systemd\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887202 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-var-lib-cni-networks-ovn-kubernetes\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887224 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-env-overrides\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887244 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-node-log\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887284 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887291 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovn-node-metrics-cert\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887354 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-script-lib\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887381 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-kubelet\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887410 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-etc-openvswitch\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887435 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-openvswitch\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887460 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xwsz\" (UniqueName: \"kubernetes.io/projected/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-kube-api-access-7xwsz\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887504 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-bin\") pod \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\" (UID: \"a4bbd5a9-5b78-4e07-b4af-e10d4768de95\") " Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887643 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887675 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887682 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887721 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887748 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887867 4706 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-kubelet\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887885 4706 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887898 4706 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887901 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887912 4706 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887925 4706 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887929 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887939 4706 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-netd\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887950 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-node-log" (OuterVolumeSpecName: "node-log") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887953 4706 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-systemd-units\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887978 4706 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-netns\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887988 4706 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-log-socket\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.887997 4706 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-slash\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.888007 4706 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.888017 4706 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.888026 4706 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.888035 4706 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-env-overrides\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.892927 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.893285 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-kube-api-access-7xwsz" (OuterVolumeSpecName: "kube-api-access-7xwsz") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "kube-api-access-7xwsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.902805 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "a4bbd5a9-5b78-4e07-b4af-e10d4768de95" (UID: "a4bbd5a9-5b78-4e07-b4af-e10d4768de95"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989664 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-env-overrides\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989703 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-log-socket\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989719 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-etc-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989741 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-cni-bin\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989771 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-ovn\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989794 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrf8p\" (UniqueName: \"kubernetes.io/projected/7442d7da-2e4e-4e51-9b3e-e30be3a93849-kube-api-access-zrf8p\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989816 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-systemd-units\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989854 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-cni-netd\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989883 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-var-lib-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989902 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovnkube-script-lib\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989921 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovn-node-metrics-cert\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989978 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-run-netns\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.989993 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-systemd\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990009 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990021 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-run-ovn-kubernetes\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990103 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-kubelet\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990171 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-slash\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990191 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990212 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-node-log\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990276 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovnkube-config\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990362 4706 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-host-cni-bin\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990376 4706 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-run-systemd\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990385 4706 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-node-log\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990395 4706 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990405 4706 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:32 crc kubenswrapper[4706]: I1206 05:38:32.990414 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xwsz\" (UniqueName: \"kubernetes.io/projected/a4bbd5a9-5b78-4e07-b4af-e10d4768de95-kube-api-access-7xwsz\") on node \"crc\" DevicePath \"\"" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091693 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-ovn\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091745 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrf8p\" (UniqueName: \"kubernetes.io/projected/7442d7da-2e4e-4e51-9b3e-e30be3a93849-kube-api-access-zrf8p\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091766 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-ovn\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091772 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-systemd-units\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091801 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-systemd-units\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091827 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-cni-netd\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091851 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-var-lib-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091867 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovnkube-script-lib\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091884 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovn-node-metrics-cert\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091908 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-run-netns\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091926 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-run-ovn-kubernetes\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091942 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-systemd\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091956 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091973 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-kubelet\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.091995 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-slash\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092012 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092027 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-node-log\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092085 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovnkube-config\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092117 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-run-ovn-kubernetes\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092118 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-env-overrides\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092183 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092197 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-slash\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092262 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-node-log\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092285 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092288 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-run-systemd\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092305 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-kubelet\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092342 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-run-netns\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092319 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-cni-netd\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092359 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-var-lib-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092410 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-log-socket\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092443 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-etc-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092474 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-cni-bin\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092477 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-etc-openvswitch\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092480 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-log-socket\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.092537 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/7442d7da-2e4e-4e51-9b3e-e30be3a93849-host-cni-bin\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.093080 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovnkube-script-lib\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.093090 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovnkube-config\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.093389 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/7442d7da-2e4e-4e51-9b3e-e30be3a93849-env-overrides\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.095301 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/7442d7da-2e4e-4e51-9b3e-e30be3a93849-ovn-node-metrics-cert\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.106903 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrf8p\" (UniqueName: \"kubernetes.io/projected/7442d7da-2e4e-4e51-9b3e-e30be3a93849-kube-api-access-zrf8p\") pod \"ovnkube-node-fxhmh\" (UID: \"7442d7da-2e4e-4e51-9b3e-e30be3a93849\") " pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.164190 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:33 crc kubenswrapper[4706]: W1206 05:38:33.183129 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7442d7da_2e4e_4e51_9b3e_e30be3a93849.slice/crio-60a8140def4e5e26241122023bd49cb3282a9ec387ab4b23fc1a8e0324ab4640 WatchSource:0}: Error finding container 60a8140def4e5e26241122023bd49cb3282a9ec387ab4b23fc1a8e0324ab4640: Status 404 returned error can't find the container with id 60a8140def4e5e26241122023bd49cb3282a9ec387ab4b23fc1a8e0324ab4640 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.221370 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"60a8140def4e5e26241122023bd49cb3282a9ec387ab4b23fc1a8e0324ab4640"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.224639 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovnkube-controller/3.log" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.229346 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovn-acl-logging/0.log" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.230224 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-l5xg7_a4bbd5a9-5b78-4e07-b4af-e10d4768de95/ovn-controller/0.log" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.230905 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" exitCode=0 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.230995 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231010 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" exitCode=0 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231034 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" exitCode=0 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231258 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" exitCode=0 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.230962 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231401 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231452 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231511 4706 scope.go:117] "RemoveContainer" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231276 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" exitCode=0 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231515 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231554 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" exitCode=0 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231796 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" exitCode=143 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231814 4706 generic.go:334] "Generic (PLEG): container finished" podID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" exitCode=143 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231804 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231877 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231915 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231948 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231964 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231979 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.231995 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232009 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232024 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232043 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232142 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232165 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232192 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232210 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232225 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232240 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232253 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232267 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232282 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232295 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232309 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232322 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232347 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232368 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232384 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232395 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232405 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232415 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232426 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232437 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232447 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232486 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232497 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232520 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l5xg7" event={"ID":"a4bbd5a9-5b78-4e07-b4af-e10d4768de95","Type":"ContainerDied","Data":"e88ce5555f9b3f6c0c60cb5a3aa1d1ce51ec2f33b9c49b87e84d51b52cdfd55f"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232540 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232555 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232566 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232579 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232591 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232602 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232612 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232623 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232636 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.232646 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.237380 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/2.log" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.238013 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/1.log" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.238073 4706 generic.go:334] "Generic (PLEG): container finished" podID="f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5" containerID="797e4fef63c5c93dfa90e5d32f66cfdc8814ee8ea24d8c8a5751ba1b17fc9401" exitCode=2 Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.238115 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerDied","Data":"797e4fef63c5c93dfa90e5d32f66cfdc8814ee8ea24d8c8a5751ba1b17fc9401"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.238146 4706 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf"} Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.238747 4706 scope.go:117] "RemoveContainer" containerID="797e4fef63c5c93dfa90e5d32f66cfdc8814ee8ea24d8c8a5751ba1b17fc9401" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.262409 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.286627 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-l5xg7"] Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.297236 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-l5xg7"] Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.306893 4706 scope.go:117] "RemoveContainer" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.396504 4706 scope.go:117] "RemoveContainer" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.429262 4706 scope.go:117] "RemoveContainer" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.443863 4706 scope.go:117] "RemoveContainer" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.456117 4706 scope.go:117] "RemoveContainer" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.468300 4706 scope.go:117] "RemoveContainer" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.483063 4706 scope.go:117] "RemoveContainer" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.505430 4706 scope.go:117] "RemoveContainer" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.519203 4706 scope.go:117] "RemoveContainer" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.519662 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": container with ID starting with 0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305 not found: ID does not exist" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.519715 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} err="failed to get container status \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": rpc error: code = NotFound desc = could not find container \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": container with ID starting with 0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.519749 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.520099 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": container with ID starting with 5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2 not found: ID does not exist" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.520146 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} err="failed to get container status \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": rpc error: code = NotFound desc = could not find container \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": container with ID starting with 5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.520181 4706 scope.go:117] "RemoveContainer" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.520852 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": container with ID starting with b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5 not found: ID does not exist" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.520881 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} err="failed to get container status \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": rpc error: code = NotFound desc = could not find container \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": container with ID starting with b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.520895 4706 scope.go:117] "RemoveContainer" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.521163 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": container with ID starting with 019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6 not found: ID does not exist" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.521192 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} err="failed to get container status \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": rpc error: code = NotFound desc = could not find container \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": container with ID starting with 019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.521207 4706 scope.go:117] "RemoveContainer" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.521510 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": container with ID starting with 097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92 not found: ID does not exist" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.521551 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} err="failed to get container status \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": rpc error: code = NotFound desc = could not find container \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": container with ID starting with 097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.521578 4706 scope.go:117] "RemoveContainer" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.521961 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": container with ID starting with 68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7 not found: ID does not exist" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.521987 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} err="failed to get container status \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": rpc error: code = NotFound desc = could not find container \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": container with ID starting with 68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522006 4706 scope.go:117] "RemoveContainer" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.522290 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": container with ID starting with a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab not found: ID does not exist" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522316 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} err="failed to get container status \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": rpc error: code = NotFound desc = could not find container \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": container with ID starting with a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522334 4706 scope.go:117] "RemoveContainer" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.522590 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": container with ID starting with 8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7 not found: ID does not exist" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522614 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} err="failed to get container status \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": rpc error: code = NotFound desc = could not find container \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": container with ID starting with 8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522628 4706 scope.go:117] "RemoveContainer" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.522893 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": container with ID starting with c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842 not found: ID does not exist" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522922 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} err="failed to get container status \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": rpc error: code = NotFound desc = could not find container \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": container with ID starting with c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.522937 4706 scope.go:117] "RemoveContainer" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" Dec 06 05:38:33 crc kubenswrapper[4706]: E1206 05:38:33.523273 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": container with ID starting with 28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b not found: ID does not exist" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.523294 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} err="failed to get container status \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": rpc error: code = NotFound desc = could not find container \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": container with ID starting with 28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.523308 4706 scope.go:117] "RemoveContainer" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.523539 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} err="failed to get container status \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": rpc error: code = NotFound desc = could not find container \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": container with ID starting with 0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.523564 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.523947 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} err="failed to get container status \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": rpc error: code = NotFound desc = could not find container \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": container with ID starting with 5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.523993 4706 scope.go:117] "RemoveContainer" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.524440 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} err="failed to get container status \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": rpc error: code = NotFound desc = could not find container \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": container with ID starting with b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.524462 4706 scope.go:117] "RemoveContainer" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.524676 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} err="failed to get container status \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": rpc error: code = NotFound desc = could not find container \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": container with ID starting with 019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.524697 4706 scope.go:117] "RemoveContainer" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.525083 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} err="failed to get container status \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": rpc error: code = NotFound desc = could not find container \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": container with ID starting with 097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.525108 4706 scope.go:117] "RemoveContainer" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.525398 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} err="failed to get container status \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": rpc error: code = NotFound desc = could not find container \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": container with ID starting with 68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.525425 4706 scope.go:117] "RemoveContainer" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.525760 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} err="failed to get container status \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": rpc error: code = NotFound desc = could not find container \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": container with ID starting with a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.525781 4706 scope.go:117] "RemoveContainer" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.526066 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} err="failed to get container status \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": rpc error: code = NotFound desc = could not find container \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": container with ID starting with 8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.526088 4706 scope.go:117] "RemoveContainer" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.526484 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} err="failed to get container status \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": rpc error: code = NotFound desc = could not find container \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": container with ID starting with c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.526517 4706 scope.go:117] "RemoveContainer" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.526793 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} err="failed to get container status \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": rpc error: code = NotFound desc = could not find container \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": container with ID starting with 28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.526822 4706 scope.go:117] "RemoveContainer" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.527252 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} err="failed to get container status \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": rpc error: code = NotFound desc = could not find container \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": container with ID starting with 0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.527280 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.527515 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} err="failed to get container status \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": rpc error: code = NotFound desc = could not find container \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": container with ID starting with 5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.527551 4706 scope.go:117] "RemoveContainer" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.527827 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} err="failed to get container status \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": rpc error: code = NotFound desc = could not find container \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": container with ID starting with b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.527851 4706 scope.go:117] "RemoveContainer" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.528324 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} err="failed to get container status \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": rpc error: code = NotFound desc = could not find container \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": container with ID starting with 019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.528346 4706 scope.go:117] "RemoveContainer" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.528637 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} err="failed to get container status \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": rpc error: code = NotFound desc = could not find container \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": container with ID starting with 097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.528668 4706 scope.go:117] "RemoveContainer" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529006 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} err="failed to get container status \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": rpc error: code = NotFound desc = could not find container \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": container with ID starting with 68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529039 4706 scope.go:117] "RemoveContainer" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529292 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} err="failed to get container status \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": rpc error: code = NotFound desc = could not find container \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": container with ID starting with a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529316 4706 scope.go:117] "RemoveContainer" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529604 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} err="failed to get container status \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": rpc error: code = NotFound desc = could not find container \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": container with ID starting with 8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529623 4706 scope.go:117] "RemoveContainer" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529824 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} err="failed to get container status \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": rpc error: code = NotFound desc = could not find container \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": container with ID starting with c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.529843 4706 scope.go:117] "RemoveContainer" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530059 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} err="failed to get container status \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": rpc error: code = NotFound desc = could not find container \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": container with ID starting with 28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530090 4706 scope.go:117] "RemoveContainer" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530373 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} err="failed to get container status \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": rpc error: code = NotFound desc = could not find container \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": container with ID starting with 0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530393 4706 scope.go:117] "RemoveContainer" containerID="5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530679 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2"} err="failed to get container status \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": rpc error: code = NotFound desc = could not find container \"5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2\": container with ID starting with 5a62ba0d11c23b597750c91fbea302463599234d65b6f8ee6363bef52c4f52b2 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530709 4706 scope.go:117] "RemoveContainer" containerID="b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.530988 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5"} err="failed to get container status \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": rpc error: code = NotFound desc = could not find container \"b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5\": container with ID starting with b2920a0c0df9340a255d3ed356e40e72bfbefb2a92ba4cc3408340b8e8563de5 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531014 4706 scope.go:117] "RemoveContainer" containerID="019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531355 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6"} err="failed to get container status \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": rpc error: code = NotFound desc = could not find container \"019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6\": container with ID starting with 019ac548f50d70ff8a6b4c020df528ca337e0e62306d77eb24c0bc46e98ef4d6 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531376 4706 scope.go:117] "RemoveContainer" containerID="097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531605 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92"} err="failed to get container status \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": rpc error: code = NotFound desc = could not find container \"097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92\": container with ID starting with 097ad2ffb97e2b5536a41506143481b9e401508e5ddde73f9c2e0890afc67a92 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531632 4706 scope.go:117] "RemoveContainer" containerID="68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531861 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7"} err="failed to get container status \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": rpc error: code = NotFound desc = could not find container \"68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7\": container with ID starting with 68dfb5c18bcff8697e9e2791b1ebb549e8070650b4f2873eda89beded1c04ed7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.531882 4706 scope.go:117] "RemoveContainer" containerID="a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.532233 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab"} err="failed to get container status \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": rpc error: code = NotFound desc = could not find container \"a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab\": container with ID starting with a58c3178f1903566203b5bf3109b21d7d2c1d71e0d9c89a11190518b7f4feaab not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.532253 4706 scope.go:117] "RemoveContainer" containerID="8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.532471 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7"} err="failed to get container status \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": rpc error: code = NotFound desc = could not find container \"8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7\": container with ID starting with 8e50b8dc9ea96d018c9af4fc390bdb713bd06660bbbe5c390d503fbc7b3ac1b7 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.532491 4706 scope.go:117] "RemoveContainer" containerID="c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.532739 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842"} err="failed to get container status \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": rpc error: code = NotFound desc = could not find container \"c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842\": container with ID starting with c48c1bde258387c195c62713ca03c4e5134f2d48bbecec1ad4b00827c93db842 not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.532773 4706 scope.go:117] "RemoveContainer" containerID="28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.533029 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b"} err="failed to get container status \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": rpc error: code = NotFound desc = could not find container \"28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b\": container with ID starting with 28214f53fdd5a687885c0d07225a8bf927514606a42d580fd07dd6e2a139b34b not found: ID does not exist" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.533082 4706 scope.go:117] "RemoveContainer" containerID="0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305" Dec 06 05:38:33 crc kubenswrapper[4706]: I1206 05:38:33.533372 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305"} err="failed to get container status \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": rpc error: code = NotFound desc = could not find container \"0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305\": container with ID starting with 0d1ddc4380b397437aef8a3c7bc03fbccdf3aff7ae1203a53f8172a418bf4305 not found: ID does not exist" Dec 06 05:38:34 crc kubenswrapper[4706]: I1206 05:38:34.043450 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4bbd5a9-5b78-4e07-b4af-e10d4768de95" path="/var/lib/kubelet/pods/a4bbd5a9-5b78-4e07-b4af-e10d4768de95/volumes" Dec 06 05:38:34 crc kubenswrapper[4706]: I1206 05:38:34.247388 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/2.log" Dec 06 05:38:34 crc kubenswrapper[4706]: I1206 05:38:34.248229 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/1.log" Dec 06 05:38:34 crc kubenswrapper[4706]: I1206 05:38:34.248404 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-rtxrp" event={"ID":"f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5","Type":"ContainerStarted","Data":"6315615ea4095982354f5d8567871583a14f7e900e3e3406d161563e982ea95f"} Dec 06 05:38:34 crc kubenswrapper[4706]: I1206 05:38:34.249756 4706 generic.go:334] "Generic (PLEG): container finished" podID="7442d7da-2e4e-4e51-9b3e-e30be3a93849" containerID="770d3886d56cc6e0d8508a4d46afc24d632caf29406cb08659f1e96ae5c2fbee" exitCode=0 Dec 06 05:38:34 crc kubenswrapper[4706]: I1206 05:38:34.249803 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerDied","Data":"770d3886d56cc6e0d8508a4d46afc24d632caf29406cb08659f1e96ae5c2fbee"} Dec 06 05:38:35 crc kubenswrapper[4706]: I1206 05:38:35.262969 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"15a9422292818ad0c3c11625906fdd6d017a3a328dc7427f621562afa6b459fd"} Dec 06 05:38:35 crc kubenswrapper[4706]: I1206 05:38:35.263631 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"074817e01da70d3f5332cb15866a4b3d0341a3196f9999831bc841005f9943e8"} Dec 06 05:38:35 crc kubenswrapper[4706]: I1206 05:38:35.263647 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"2b812db86061423e78dbc86bcbb0473fad648892255923f4f282125473e97837"} Dec 06 05:38:35 crc kubenswrapper[4706]: I1206 05:38:35.263657 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"6435436f009387b3dc5f7c0282e33731a398bdab2894fe373e98181f7feda2aa"} Dec 06 05:38:35 crc kubenswrapper[4706]: I1206 05:38:35.263670 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"7105671728db9581f79893aad8611452e6bdbb31ae2a79c032091481ac668c6d"} Dec 06 05:38:35 crc kubenswrapper[4706]: I1206 05:38:35.263679 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"43fe6858160636d72f486d79c43b4589de3bf104fc442b392fc031f658035a67"} Dec 06 05:38:38 crc kubenswrapper[4706]: I1206 05:38:38.282480 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"e9b387aa13c9d33b7f8e409a778f926eb307927053fb49e4a03d30ebe25abb44"} Dec 06 05:38:39 crc kubenswrapper[4706]: I1206 05:38:39.037378 4706 scope.go:117] "RemoveContainer" containerID="3dedd7c8354756f4eba54307bbb72a153b9c4b8f01bbd97fce12423fd16f3aaf" Dec 06 05:38:39 crc kubenswrapper[4706]: I1206 05:38:39.289336 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-rtxrp_f4a3b88d-ed57-4b99-89d1-9a3d1ea8a0a5/kube-multus/2.log" Dec 06 05:38:40 crc kubenswrapper[4706]: I1206 05:38:40.298284 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" event={"ID":"7442d7da-2e4e-4e51-9b3e-e30be3a93849","Type":"ContainerStarted","Data":"d8eb76946cbb062c29f95f602eaf68dfe23b99ead0490341e2b9a10af2f36056"} Dec 06 05:38:42 crc kubenswrapper[4706]: I1206 05:38:42.307010 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:42 crc kubenswrapper[4706]: I1206 05:38:42.307072 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:42 crc kubenswrapper[4706]: I1206 05:38:42.307083 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:42 crc kubenswrapper[4706]: I1206 05:38:42.330115 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:42 crc kubenswrapper[4706]: I1206 05:38:42.330453 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:38:42 crc kubenswrapper[4706]: I1206 05:38:42.332731 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" podStartSLOduration=10.332721222 podStartE2EDuration="10.332721222s" podCreationTimestamp="2025-12-06 05:38:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:38:42.331808407 +0000 UTC m=+1144.659632361" watchObservedRunningTime="2025-12-06 05:38:42.332721222 +0000 UTC m=+1144.660545166" Dec 06 05:39:03 crc kubenswrapper[4706]: I1206 05:39:03.193610 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-fxhmh" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.386302 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm"] Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.387871 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.389744 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.402156 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm"] Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.434454 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttx9l\" (UniqueName: \"kubernetes.io/projected/04c31578-f89b-4b78-86fb-7809b9fa2a21-kube-api-access-ttx9l\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.434525 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.434554 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.535141 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttx9l\" (UniqueName: \"kubernetes.io/projected/04c31578-f89b-4b78-86fb-7809b9fa2a21-kube-api-access-ttx9l\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.535211 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.535236 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.535643 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-util\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.536136 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-bundle\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.560787 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttx9l\" (UniqueName: \"kubernetes.io/projected/04c31578-f89b-4b78-86fb-7809b9fa2a21-kube-api-access-ttx9l\") pod \"5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:12 crc kubenswrapper[4706]: I1206 05:39:12.712638 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:13 crc kubenswrapper[4706]: I1206 05:39:13.097018 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm"] Dec 06 05:39:13 crc kubenswrapper[4706]: I1206 05:39:13.501479 4706 generic.go:334] "Generic (PLEG): container finished" podID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerID="fe4cd26ad5ebb8298b15a44a8d2b1fa3843445e93770a1ee824939aa549fac03" exitCode=0 Dec 06 05:39:13 crc kubenswrapper[4706]: I1206 05:39:13.501537 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" event={"ID":"04c31578-f89b-4b78-86fb-7809b9fa2a21","Type":"ContainerDied","Data":"fe4cd26ad5ebb8298b15a44a8d2b1fa3843445e93770a1ee824939aa549fac03"} Dec 06 05:39:13 crc kubenswrapper[4706]: I1206 05:39:13.501812 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" event={"ID":"04c31578-f89b-4b78-86fb-7809b9fa2a21","Type":"ContainerStarted","Data":"08b15264c369b760ec23e2b6429531ae5ea368952b237d778f961a70846af966"} Dec 06 05:39:13 crc kubenswrapper[4706]: I1206 05:39:13.503174 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:39:15 crc kubenswrapper[4706]: I1206 05:39:15.517068 4706 generic.go:334] "Generic (PLEG): container finished" podID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerID="7ef777fe2524fa069e3b6aeefb8748b1cf7bb57d3e4807a86a360dd845db71b7" exitCode=0 Dec 06 05:39:15 crc kubenswrapper[4706]: I1206 05:39:15.517177 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" event={"ID":"04c31578-f89b-4b78-86fb-7809b9fa2a21","Type":"ContainerDied","Data":"7ef777fe2524fa069e3b6aeefb8748b1cf7bb57d3e4807a86a360dd845db71b7"} Dec 06 05:39:16 crc kubenswrapper[4706]: I1206 05:39:16.527529 4706 generic.go:334] "Generic (PLEG): container finished" podID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerID="b6a7e5e1fd5ea48af6cc518f04dcf7660a4666f73c20874267fa52c9b4de9001" exitCode=0 Dec 06 05:39:16 crc kubenswrapper[4706]: I1206 05:39:16.527599 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" event={"ID":"04c31578-f89b-4b78-86fb-7809b9fa2a21","Type":"ContainerDied","Data":"b6a7e5e1fd5ea48af6cc518f04dcf7660a4666f73c20874267fa52c9b4de9001"} Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.798827 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.902421 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-bundle\") pod \"04c31578-f89b-4b78-86fb-7809b9fa2a21\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.902475 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-util\") pod \"04c31578-f89b-4b78-86fb-7809b9fa2a21\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.902527 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ttx9l\" (UniqueName: \"kubernetes.io/projected/04c31578-f89b-4b78-86fb-7809b9fa2a21-kube-api-access-ttx9l\") pod \"04c31578-f89b-4b78-86fb-7809b9fa2a21\" (UID: \"04c31578-f89b-4b78-86fb-7809b9fa2a21\") " Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.903033 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-bundle" (OuterVolumeSpecName: "bundle") pod "04c31578-f89b-4b78-86fb-7809b9fa2a21" (UID: "04c31578-f89b-4b78-86fb-7809b9fa2a21"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.907588 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04c31578-f89b-4b78-86fb-7809b9fa2a21-kube-api-access-ttx9l" (OuterVolumeSpecName: "kube-api-access-ttx9l") pod "04c31578-f89b-4b78-86fb-7809b9fa2a21" (UID: "04c31578-f89b-4b78-86fb-7809b9fa2a21"). InnerVolumeSpecName "kube-api-access-ttx9l". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:39:17 crc kubenswrapper[4706]: I1206 05:39:17.915372 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-util" (OuterVolumeSpecName: "util") pod "04c31578-f89b-4b78-86fb-7809b9fa2a21" (UID: "04c31578-f89b-4b78-86fb-7809b9fa2a21"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:39:18 crc kubenswrapper[4706]: I1206 05:39:18.003296 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ttx9l\" (UniqueName: \"kubernetes.io/projected/04c31578-f89b-4b78-86fb-7809b9fa2a21-kube-api-access-ttx9l\") on node \"crc\" DevicePath \"\"" Dec 06 05:39:18 crc kubenswrapper[4706]: I1206 05:39:18.003324 4706 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:39:18 crc kubenswrapper[4706]: I1206 05:39:18.003334 4706 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04c31578-f89b-4b78-86fb-7809b9fa2a21-util\") on node \"crc\" DevicePath \"\"" Dec 06 05:39:18 crc kubenswrapper[4706]: I1206 05:39:18.540501 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" event={"ID":"04c31578-f89b-4b78-86fb-7809b9fa2a21","Type":"ContainerDied","Data":"08b15264c369b760ec23e2b6429531ae5ea368952b237d778f961a70846af966"} Dec 06 05:39:18 crc kubenswrapper[4706]: I1206 05:39:18.540846 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="08b15264c369b760ec23e2b6429531ae5ea368952b237d778f961a70846af966" Dec 06 05:39:18 crc kubenswrapper[4706]: I1206 05:39:18.540594 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.229988 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb"] Dec 06 05:39:20 crc kubenswrapper[4706]: E1206 05:39:20.230223 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="util" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.230235 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="util" Dec 06 05:39:20 crc kubenswrapper[4706]: E1206 05:39:20.230243 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="extract" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.230249 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="extract" Dec 06 05:39:20 crc kubenswrapper[4706]: E1206 05:39:20.230260 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="pull" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.230266 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="pull" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.230356 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="04c31578-f89b-4b78-86fb-7809b9fa2a21" containerName="extract" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.230741 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.232389 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-ngbmc" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.232450 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.232732 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.240295 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb"] Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.330409 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l65tc\" (UniqueName: \"kubernetes.io/projected/77c327d8-4531-43a9-991e-f913f7e1d02e-kube-api-access-l65tc\") pod \"nmstate-operator-5b5b58f5c8-xpljb\" (UID: \"77c327d8-4531-43a9-991e-f913f7e1d02e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.432163 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l65tc\" (UniqueName: \"kubernetes.io/projected/77c327d8-4531-43a9-991e-f913f7e1d02e-kube-api-access-l65tc\") pod \"nmstate-operator-5b5b58f5c8-xpljb\" (UID: \"77c327d8-4531-43a9-991e-f913f7e1d02e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.447546 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l65tc\" (UniqueName: \"kubernetes.io/projected/77c327d8-4531-43a9-991e-f913f7e1d02e-kube-api-access-l65tc\") pod \"nmstate-operator-5b5b58f5c8-xpljb\" (UID: \"77c327d8-4531-43a9-991e-f913f7e1d02e\") " pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.545005 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" Dec 06 05:39:20 crc kubenswrapper[4706]: I1206 05:39:20.988513 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb"] Dec 06 05:39:21 crc kubenswrapper[4706]: I1206 05:39:21.556720 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" event={"ID":"77c327d8-4531-43a9-991e-f913f7e1d02e","Type":"ContainerStarted","Data":"7e21330bf37122f4ac0f86cf11025c69b1a971c1f5d9041dd4f5b8f6ae7255f2"} Dec 06 05:39:23 crc kubenswrapper[4706]: I1206 05:39:23.570882 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" event={"ID":"77c327d8-4531-43a9-991e-f913f7e1d02e","Type":"ContainerStarted","Data":"362082b70a51de7949d4439a0fbfe32b2b450375bb906e95096c0111d7278f8a"} Dec 06 05:39:23 crc kubenswrapper[4706]: I1206 05:39:23.593784 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-5b5b58f5c8-xpljb" podStartSLOduration=1.348128006 podStartE2EDuration="3.593765056s" podCreationTimestamp="2025-12-06 05:39:20 +0000 UTC" firstStartedPulling="2025-12-06 05:39:20.998748027 +0000 UTC m=+1183.326571971" lastFinishedPulling="2025-12-06 05:39:23.244385077 +0000 UTC m=+1185.572209021" observedRunningTime="2025-12-06 05:39:23.587203486 +0000 UTC m=+1185.915027430" watchObservedRunningTime="2025-12-06 05:39:23.593765056 +0000 UTC m=+1185.921589020" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.507741 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.508771 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.510756 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-n2gh8" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.523511 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.524345 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.526584 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.552792 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.569640 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-l4lvf"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.570503 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.583439 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.585324 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kttbb\" (UniqueName: \"kubernetes.io/projected/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-kube-api-access-kttbb\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.585480 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfhsf\" (UniqueName: \"kubernetes.io/projected/eeea5f87-d6ea-47d3-86aa-4e5ed4562078-kube-api-access-cfhsf\") pod \"nmstate-metrics-7f946cbc9-5jtbm\" (UID: \"eeea5f87-d6ea-47d3-86aa-4e5ed4562078\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.591110 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.686527 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfhsf\" (UniqueName: \"kubernetes.io/projected/eeea5f87-d6ea-47d3-86aa-4e5ed4562078-kube-api-access-cfhsf\") pod \"nmstate-metrics-7f946cbc9-5jtbm\" (UID: \"eeea5f87-d6ea-47d3-86aa-4e5ed4562078\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.686811 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-ovs-socket\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.686944 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lk8c\" (UniqueName: \"kubernetes.io/projected/ab718c3d-1427-4fc0-b728-6925fca42caf-kube-api-access-8lk8c\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.687114 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.687649 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-dbus-socket\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.687759 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-nmstate-lock\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.687882 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kttbb\" (UniqueName: \"kubernetes.io/projected/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-kube-api-access-kttbb\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:24 crc kubenswrapper[4706]: E1206 05:39:24.687291 4706 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Dec 06 05:39:24 crc kubenswrapper[4706]: E1206 05:39:24.688134 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-tls-key-pair podName:aa5bcff8-fac7-4a00-b7f7-312f70ad11b2 nodeName:}" failed. No retries permitted until 2025-12-06 05:39:25.188109839 +0000 UTC m=+1187.515933784 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-tls-key-pair") pod "nmstate-webhook-5f6d4c5ccb-r55tp" (UID: "aa5bcff8-fac7-4a00-b7f7-312f70ad11b2") : secret "openshift-nmstate-webhook" not found Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.708654 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.709398 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.722653 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.722721 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-mdjqd" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.725020 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kttbb\" (UniqueName: \"kubernetes.io/projected/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-kube-api-access-kttbb\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.727904 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.730643 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.732019 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfhsf\" (UniqueName: \"kubernetes.io/projected/eeea5f87-d6ea-47d3-86aa-4e5ed4562078-kube-api-access-cfhsf\") pod \"nmstate-metrics-7f946cbc9-5jtbm\" (UID: \"eeea5f87-d6ea-47d3-86aa-4e5ed4562078\") " pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788552 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1d9e9551-a46a-42b6-a9b4-b78a3994239a-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788601 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d9e9551-a46a-42b6-a9b4-b78a3994239a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788638 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-ovs-socket\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788673 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-ovs-socket\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788811 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lk8c\" (UniqueName: \"kubernetes.io/projected/ab718c3d-1427-4fc0-b728-6925fca42caf-kube-api-access-8lk8c\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788891 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6bzdv\" (UniqueName: \"kubernetes.io/projected/1d9e9551-a46a-42b6-a9b4-b78a3994239a-kube-api-access-6bzdv\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.788987 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-dbus-socket\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.789059 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-nmstate-lock\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.789177 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-nmstate-lock\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.789311 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/ab718c3d-1427-4fc0-b728-6925fca42caf-dbus-socket\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.806842 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lk8c\" (UniqueName: \"kubernetes.io/projected/ab718c3d-1427-4fc0-b728-6925fca42caf-kube-api-access-8lk8c\") pod \"nmstate-handler-l4lvf\" (UID: \"ab718c3d-1427-4fc0-b728-6925fca42caf\") " pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.828612 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.888557 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.889954 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1d9e9551-a46a-42b6-a9b4-b78a3994239a-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.890009 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d9e9551-a46a-42b6-a9b4-b78a3994239a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.890098 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6bzdv\" (UniqueName: \"kubernetes.io/projected/1d9e9551-a46a-42b6-a9b4-b78a3994239a-kube-api-access-6bzdv\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: E1206 05:39:24.890251 4706 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Dec 06 05:39:24 crc kubenswrapper[4706]: E1206 05:39:24.890334 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1d9e9551-a46a-42b6-a9b4-b78a3994239a-plugin-serving-cert podName:1d9e9551-a46a-42b6-a9b4-b78a3994239a nodeName:}" failed. No retries permitted until 2025-12-06 05:39:25.390316254 +0000 UTC m=+1187.718140198 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/1d9e9551-a46a-42b6-a9b4-b78a3994239a-plugin-serving-cert") pod "nmstate-console-plugin-7fbb5f6569-8c75x" (UID: "1d9e9551-a46a-42b6-a9b4-b78a3994239a") : secret "plugin-serving-cert" not found Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.890963 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/1d9e9551-a46a-42b6-a9b4-b78a3994239a-nginx-conf\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.895238 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-695d854bf9-5krdj"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.895900 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.914287 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-695d854bf9-5krdj"] Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.925883 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6bzdv\" (UniqueName: \"kubernetes.io/projected/1d9e9551-a46a-42b6-a9b4-b78a3994239a-kube-api-access-6bzdv\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991487 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-service-ca\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991560 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-oauth-serving-cert\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991745 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-config\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991787 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-oauth-config\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991807 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-serving-cert\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991834 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-trusted-ca-bundle\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:24 crc kubenswrapper[4706]: I1206 05:39:24.991852 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xdcd5\" (UniqueName: \"kubernetes.io/projected/def86a44-442d-4e7d-aef8-b7d733f9c61b-kube-api-access-xdcd5\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.048683 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm"] Dec 06 05:39:25 crc kubenswrapper[4706]: W1206 05:39:25.052181 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeeea5f87_d6ea_47d3_86aa_4e5ed4562078.slice/crio-84031086a5b66494ba649c8e20665f60f24171b9262c2b776dc161abd68060cc WatchSource:0}: Error finding container 84031086a5b66494ba649c8e20665f60f24171b9262c2b776dc161abd68060cc: Status 404 returned error can't find the container with id 84031086a5b66494ba649c8e20665f60f24171b9262c2b776dc161abd68060cc Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095204 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-config\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095258 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-oauth-config\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095285 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-serving-cert\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095314 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-trusted-ca-bundle\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095335 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xdcd5\" (UniqueName: \"kubernetes.io/projected/def86a44-442d-4e7d-aef8-b7d733f9c61b-kube-api-access-xdcd5\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095370 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-service-ca\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.095458 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-oauth-serving-cert\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.096436 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-config\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.096840 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-trusted-ca-bundle\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.097026 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-service-ca\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.098386 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/def86a44-442d-4e7d-aef8-b7d733f9c61b-oauth-serving-cert\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.101449 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-oauth-config\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.101484 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/def86a44-442d-4e7d-aef8-b7d733f9c61b-console-serving-cert\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.109277 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xdcd5\" (UniqueName: \"kubernetes.io/projected/def86a44-442d-4e7d-aef8-b7d733f9c61b-kube-api-access-xdcd5\") pod \"console-695d854bf9-5krdj\" (UID: \"def86a44-442d-4e7d-aef8-b7d733f9c61b\") " pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.196305 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.199689 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/aa5bcff8-fac7-4a00-b7f7-312f70ad11b2-tls-key-pair\") pod \"nmstate-webhook-5f6d4c5ccb-r55tp\" (UID: \"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2\") " pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.210183 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.392498 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-695d854bf9-5krdj"] Dec 06 05:39:25 crc kubenswrapper[4706]: W1206 05:39:25.399872 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddef86a44_442d_4e7d_aef8_b7d733f9c61b.slice/crio-4014506b0ea6f8dbabac1d6ff7b9c5ce066b5b821f90e345bef5af1864e6123a WatchSource:0}: Error finding container 4014506b0ea6f8dbabac1d6ff7b9c5ce066b5b821f90e345bef5af1864e6123a: Status 404 returned error can't find the container with id 4014506b0ea6f8dbabac1d6ff7b9c5ce066b5b821f90e345bef5af1864e6123a Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.400530 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d9e9551-a46a-42b6-a9b4-b78a3994239a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.405285 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/1d9e9551-a46a-42b6-a9b4-b78a3994239a-plugin-serving-cert\") pod \"nmstate-console-plugin-7fbb5f6569-8c75x\" (UID: \"1d9e9551-a46a-42b6-a9b4-b78a3994239a\") " pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.453498 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.583854 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-l4lvf" event={"ID":"ab718c3d-1427-4fc0-b728-6925fca42caf","Type":"ContainerStarted","Data":"9bebb471cc7bed7c24c5f6e393edc0b572f49261cacbf8184bb0764d4a16518b"} Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.592988 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-695d854bf9-5krdj" event={"ID":"def86a44-442d-4e7d-aef8-b7d733f9c61b","Type":"ContainerStarted","Data":"eb7fed13ada16d2a1a13444fb9e4859bbf35acab021698348a2a2a18c57d1f5f"} Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.593067 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-695d854bf9-5krdj" event={"ID":"def86a44-442d-4e7d-aef8-b7d733f9c61b","Type":"ContainerStarted","Data":"4014506b0ea6f8dbabac1d6ff7b9c5ce066b5b821f90e345bef5af1864e6123a"} Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.597547 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" event={"ID":"eeea5f87-d6ea-47d3-86aa-4e5ed4562078","Type":"ContainerStarted","Data":"84031086a5b66494ba649c8e20665f60f24171b9262c2b776dc161abd68060cc"} Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.612164 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-695d854bf9-5krdj" podStartSLOduration=1.612148914 podStartE2EDuration="1.612148914s" podCreationTimestamp="2025-12-06 05:39:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:39:25.611334261 +0000 UTC m=+1187.939158205" watchObservedRunningTime="2025-12-06 05:39:25.612148914 +0000 UTC m=+1187.939972858" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.648196 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp"] Dec 06 05:39:25 crc kubenswrapper[4706]: W1206 05:39:25.651379 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa5bcff8_fac7_4a00_b7f7_312f70ad11b2.slice/crio-5517cca1e0b7a1956bc81d78c963a17a7625b4add9ca1c200e2e9ce23c81f50b WatchSource:0}: Error finding container 5517cca1e0b7a1956bc81d78c963a17a7625b4add9ca1c200e2e9ce23c81f50b: Status 404 returned error can't find the container with id 5517cca1e0b7a1956bc81d78c963a17a7625b4add9ca1c200e2e9ce23c81f50b Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.658778 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" Dec 06 05:39:25 crc kubenswrapper[4706]: I1206 05:39:25.836525 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x"] Dec 06 05:39:25 crc kubenswrapper[4706]: W1206 05:39:25.840220 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d9e9551_a46a_42b6_a9b4_b78a3994239a.slice/crio-da014f5555a52cfcaab4409781739dc0b58027636431e23bd5764c1796d3f741 WatchSource:0}: Error finding container da014f5555a52cfcaab4409781739dc0b58027636431e23bd5764c1796d3f741: Status 404 returned error can't find the container with id da014f5555a52cfcaab4409781739dc0b58027636431e23bd5764c1796d3f741 Dec 06 05:39:26 crc kubenswrapper[4706]: I1206 05:39:26.604785 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" event={"ID":"1d9e9551-a46a-42b6-a9b4-b78a3994239a","Type":"ContainerStarted","Data":"da014f5555a52cfcaab4409781739dc0b58027636431e23bd5764c1796d3f741"} Dec 06 05:39:26 crc kubenswrapper[4706]: I1206 05:39:26.607393 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" event={"ID":"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2","Type":"ContainerStarted","Data":"5517cca1e0b7a1956bc81d78c963a17a7625b4add9ca1c200e2e9ce23c81f50b"} Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.632878 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-l4lvf" event={"ID":"ab718c3d-1427-4fc0-b728-6925fca42caf","Type":"ContainerStarted","Data":"218ab338479379f0edc1185c1006e41e23e79efba989922bb10f36bf6e5274da"} Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.633512 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.635194 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" event={"ID":"1d9e9551-a46a-42b6-a9b4-b78a3994239a","Type":"ContainerStarted","Data":"a50e500dbe995d0e1c993bb09dfe3c1ff3620e292fefcb5943149a0948ee78b1"} Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.637147 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" event={"ID":"aa5bcff8-fac7-4a00-b7f7-312f70ad11b2","Type":"ContainerStarted","Data":"54f31afb5b6d2f501a7715d77ea581dd45bf3b904c45d5765dc27f1f93dfe7a9"} Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.637314 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.650564 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-l4lvf" podStartSLOduration=1.7065966590000001 podStartE2EDuration="5.650548466s" podCreationTimestamp="2025-12-06 05:39:24 +0000 UTC" firstStartedPulling="2025-12-06 05:39:24.952451541 +0000 UTC m=+1187.280275485" lastFinishedPulling="2025-12-06 05:39:28.896403338 +0000 UTC m=+1191.224227292" observedRunningTime="2025-12-06 05:39:29.645148507 +0000 UTC m=+1191.972972471" watchObservedRunningTime="2025-12-06 05:39:29.650548466 +0000 UTC m=+1191.978372410" Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.665454 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" podStartSLOduration=2.372281815 podStartE2EDuration="5.665436024s" podCreationTimestamp="2025-12-06 05:39:24 +0000 UTC" firstStartedPulling="2025-12-06 05:39:25.65388534 +0000 UTC m=+1187.981709284" lastFinishedPulling="2025-12-06 05:39:28.947039529 +0000 UTC m=+1191.274863493" observedRunningTime="2025-12-06 05:39:29.660716476 +0000 UTC m=+1191.988540420" watchObservedRunningTime="2025-12-06 05:39:29.665436024 +0000 UTC m=+1191.993259988" Dec 06 05:39:29 crc kubenswrapper[4706]: I1206 05:39:29.680506 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7fbb5f6569-8c75x" podStartSLOduration=2.562940214 podStartE2EDuration="5.680449828s" podCreationTimestamp="2025-12-06 05:39:24 +0000 UTC" firstStartedPulling="2025-12-06 05:39:25.841821223 +0000 UTC m=+1188.169645167" lastFinishedPulling="2025-12-06 05:39:28.959330837 +0000 UTC m=+1191.287154781" observedRunningTime="2025-12-06 05:39:29.674266008 +0000 UTC m=+1192.002089972" watchObservedRunningTime="2025-12-06 05:39:29.680449828 +0000 UTC m=+1192.008273782" Dec 06 05:39:30 crc kubenswrapper[4706]: I1206 05:39:30.643186 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" event={"ID":"eeea5f87-d6ea-47d3-86aa-4e5ed4562078","Type":"ContainerStarted","Data":"0475c0fb2a94bafc4047825e88d0a4ac86fd2bb78480089795d5c686670bc815"} Dec 06 05:39:32 crc kubenswrapper[4706]: I1206 05:39:32.656270 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" event={"ID":"eeea5f87-d6ea-47d3-86aa-4e5ed4562078","Type":"ContainerStarted","Data":"a76f2420c71af8c17c467470436c2dcb8349ad1731c80fadb658ef02df0f05ec"} Dec 06 05:39:32 crc kubenswrapper[4706]: I1206 05:39:32.681092 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-7f946cbc9-5jtbm" podStartSLOduration=2.013111789 podStartE2EDuration="8.681030809s" podCreationTimestamp="2025-12-06 05:39:24 +0000 UTC" firstStartedPulling="2025-12-06 05:39:25.054643258 +0000 UTC m=+1187.382467202" lastFinishedPulling="2025-12-06 05:39:31.722562278 +0000 UTC m=+1194.050386222" observedRunningTime="2025-12-06 05:39:32.67637351 +0000 UTC m=+1195.004197524" watchObservedRunningTime="2025-12-06 05:39:32.681030809 +0000 UTC m=+1195.008854823" Dec 06 05:39:34 crc kubenswrapper[4706]: I1206 05:39:34.925873 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-l4lvf" Dec 06 05:39:35 crc kubenswrapper[4706]: I1206 05:39:35.211084 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:35 crc kubenswrapper[4706]: I1206 05:39:35.211176 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:35 crc kubenswrapper[4706]: I1206 05:39:35.216174 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:35 crc kubenswrapper[4706]: I1206 05:39:35.680562 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-695d854bf9-5krdj" Dec 06 05:39:35 crc kubenswrapper[4706]: I1206 05:39:35.727329 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-t4xd8"] Dec 06 05:39:45 crc kubenswrapper[4706]: I1206 05:39:45.461213 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-5f6d4c5ccb-r55tp" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.620805 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6"] Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.622820 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.624750 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.633370 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6"] Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.786713 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5g2mr\" (UniqueName: \"kubernetes.io/projected/09b72ef4-066a-4aea-ad04-27d8bca291b8-kube-api-access-5g2mr\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.786808 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.786893 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.887787 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.888160 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.888266 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5g2mr\" (UniqueName: \"kubernetes.io/projected/09b72ef4-066a-4aea-ad04-27d8bca291b8-kube-api-access-5g2mr\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.888577 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-util\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.888815 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-bundle\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.910331 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5g2mr\" (UniqueName: \"kubernetes.io/projected/09b72ef4-066a-4aea-ad04-27d8bca291b8-kube-api-access-5g2mr\") pod \"af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:58 crc kubenswrapper[4706]: I1206 05:39:58.993074 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:39:59 crc kubenswrapper[4706]: I1206 05:39:59.443411 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6"] Dec 06 05:39:59 crc kubenswrapper[4706]: I1206 05:39:59.849996 4706 generic.go:334] "Generic (PLEG): container finished" podID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerID="83e37288f7c1c2391ff258095f561acc2d74e11d53c830411112ade1d5637b2e" exitCode=0 Dec 06 05:39:59 crc kubenswrapper[4706]: I1206 05:39:59.850072 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" event={"ID":"09b72ef4-066a-4aea-ad04-27d8bca291b8","Type":"ContainerDied","Data":"83e37288f7c1c2391ff258095f561acc2d74e11d53c830411112ade1d5637b2e"} Dec 06 05:39:59 crc kubenswrapper[4706]: I1206 05:39:59.850301 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" event={"ID":"09b72ef4-066a-4aea-ad04-27d8bca291b8","Type":"ContainerStarted","Data":"cc4fd716266a075707d6f78dbcbb6267033d59ec4de03c1638fbcc2ffb455d0d"} Dec 06 05:40:00 crc kubenswrapper[4706]: I1206 05:40:00.783123 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-t4xd8" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" containerID="cri-o://5ff09d37f29d70a08c9506117f34107ef1fe0dffa353358e0ca12f6f7dd35fcb" gracePeriod=15 Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.870641 4706 generic.go:334] "Generic (PLEG): container finished" podID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerID="3e3024f394ee2d95b80b3511d1c5ebfcdc1cb114f3b2745835a66df01a3b821f" exitCode=0 Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.870702 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" event={"ID":"09b72ef4-066a-4aea-ad04-27d8bca291b8","Type":"ContainerDied","Data":"3e3024f394ee2d95b80b3511d1c5ebfcdc1cb114f3b2745835a66df01a3b821f"} Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.875281 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-t4xd8_ed24741b-5476-4f20-bd17-4c8686d40419/console/0.log" Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.875321 4706 generic.go:334] "Generic (PLEG): container finished" podID="ed24741b-5476-4f20-bd17-4c8686d40419" containerID="5ff09d37f29d70a08c9506117f34107ef1fe0dffa353358e0ca12f6f7dd35fcb" exitCode=2 Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.875347 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-t4xd8" event={"ID":"ed24741b-5476-4f20-bd17-4c8686d40419","Type":"ContainerDied","Data":"5ff09d37f29d70a08c9506117f34107ef1fe0dffa353358e0ca12f6f7dd35fcb"} Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.992822 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-t4xd8_ed24741b-5476-4f20-bd17-4c8686d40419/console/0.log" Dec 06 05:40:02 crc kubenswrapper[4706]: I1206 05:40:02.992896 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065449 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-trusted-ca-bundle\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065630 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-oauth-config\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065713 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-service-ca\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065761 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nlj9g\" (UniqueName: \"kubernetes.io/projected/ed24741b-5476-4f20-bd17-4c8686d40419-kube-api-access-nlj9g\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065805 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-serving-cert\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065824 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-oauth-serving-cert\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.065903 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-console-config\") pod \"ed24741b-5476-4f20-bd17-4c8686d40419\" (UID: \"ed24741b-5476-4f20-bd17-4c8686d40419\") " Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.066659 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.067025 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-service-ca" (OuterVolumeSpecName: "service-ca") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.067281 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.067397 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-console-config" (OuterVolumeSpecName: "console-config") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.072329 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.073039 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed24741b-5476-4f20-bd17-4c8686d40419-kube-api-access-nlj9g" (OuterVolumeSpecName: "kube-api-access-nlj9g") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "kube-api-access-nlj9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.074610 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ed24741b-5476-4f20-bd17-4c8686d40419" (UID: "ed24741b-5476-4f20-bd17-4c8686d40419"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167345 4706 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-oauth-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167374 4706 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-service-ca\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167385 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nlj9g\" (UniqueName: \"kubernetes.io/projected/ed24741b-5476-4f20-bd17-4c8686d40419-kube-api-access-nlj9g\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167396 4706 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ed24741b-5476-4f20-bd17-4c8686d40419-console-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167406 4706 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167414 4706 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-console-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.167422 4706 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ed24741b-5476-4f20-bd17-4c8686d40419-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.896940 4706 generic.go:334] "Generic (PLEG): container finished" podID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerID="b821dcd3bc8ebee2b7d259043383522125ac401b3479203c2767c1719b26fd38" exitCode=0 Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.897276 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" event={"ID":"09b72ef4-066a-4aea-ad04-27d8bca291b8","Type":"ContainerDied","Data":"b821dcd3bc8ebee2b7d259043383522125ac401b3479203c2767c1719b26fd38"} Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.901357 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-t4xd8_ed24741b-5476-4f20-bd17-4c8686d40419/console/0.log" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.901414 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-t4xd8" event={"ID":"ed24741b-5476-4f20-bd17-4c8686d40419","Type":"ContainerDied","Data":"4c4a31b8b66cea562439ba1bcf44978e8a9aac0cbb2a042175bfab8e9b0f43d6"} Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.901452 4706 scope.go:117] "RemoveContainer" containerID="5ff09d37f29d70a08c9506117f34107ef1fe0dffa353358e0ca12f6f7dd35fcb" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.901528 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-t4xd8" Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.942543 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-t4xd8"] Dec 06 05:40:03 crc kubenswrapper[4706]: I1206 05:40:03.946887 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-t4xd8"] Dec 06 05:40:04 crc kubenswrapper[4706]: I1206 05:40:04.048136 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" path="/var/lib/kubelet/pods/ed24741b-5476-4f20-bd17-4c8686d40419/volumes" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.277387 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.297488 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5g2mr\" (UniqueName: \"kubernetes.io/projected/09b72ef4-066a-4aea-ad04-27d8bca291b8-kube-api-access-5g2mr\") pod \"09b72ef4-066a-4aea-ad04-27d8bca291b8\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.297591 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-bundle\") pod \"09b72ef4-066a-4aea-ad04-27d8bca291b8\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.297740 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-util\") pod \"09b72ef4-066a-4aea-ad04-27d8bca291b8\" (UID: \"09b72ef4-066a-4aea-ad04-27d8bca291b8\") " Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.299214 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-bundle" (OuterVolumeSpecName: "bundle") pod "09b72ef4-066a-4aea-ad04-27d8bca291b8" (UID: "09b72ef4-066a-4aea-ad04-27d8bca291b8"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.307837 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09b72ef4-066a-4aea-ad04-27d8bca291b8-kube-api-access-5g2mr" (OuterVolumeSpecName: "kube-api-access-5g2mr") pod "09b72ef4-066a-4aea-ad04-27d8bca291b8" (UID: "09b72ef4-066a-4aea-ad04-27d8bca291b8"). InnerVolumeSpecName "kube-api-access-5g2mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.309919 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-util" (OuterVolumeSpecName: "util") pod "09b72ef4-066a-4aea-ad04-27d8bca291b8" (UID: "09b72ef4-066a-4aea-ad04-27d8bca291b8"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.398980 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5g2mr\" (UniqueName: \"kubernetes.io/projected/09b72ef4-066a-4aea-ad04-27d8bca291b8-kube-api-access-5g2mr\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.399019 4706 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.399030 4706 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09b72ef4-066a-4aea-ad04-27d8bca291b8-util\") on node \"crc\" DevicePath \"\"" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.927712 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" event={"ID":"09b72ef4-066a-4aea-ad04-27d8bca291b8","Type":"ContainerDied","Data":"cc4fd716266a075707d6f78dbcbb6267033d59ec4de03c1638fbcc2ffb455d0d"} Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.927760 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cc4fd716266a075707d6f78dbcbb6267033d59ec4de03c1638fbcc2ffb455d0d" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.927940 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6" Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.961360 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:40:05 crc kubenswrapper[4706]: I1206 05:40:05.961548 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.569383 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w"] Dec 06 05:40:13 crc kubenswrapper[4706]: E1206 05:40:13.570138 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="extract" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570151 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="extract" Dec 06 05:40:13 crc kubenswrapper[4706]: E1206 05:40:13.570168 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570174 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" Dec 06 05:40:13 crc kubenswrapper[4706]: E1206 05:40:13.570180 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="util" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570187 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="util" Dec 06 05:40:13 crc kubenswrapper[4706]: E1206 05:40:13.570194 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="pull" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570201 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="pull" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570293 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="09b72ef4-066a-4aea-ad04-27d8bca291b8" containerName="extract" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570306 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed24741b-5476-4f20-bd17-4c8686d40419" containerName="console" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.570702 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.572992 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.577472 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-j59ts" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.577536 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.577612 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.578335 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.597529 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w"] Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.601862 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/86959832-935a-46cc-85bc-f0b9b39340a7-webhook-cert\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.601934 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/86959832-935a-46cc-85bc-f0b9b39340a7-apiservice-cert\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.602266 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpwxj\" (UniqueName: \"kubernetes.io/projected/86959832-935a-46cc-85bc-f0b9b39340a7-kube-api-access-zpwxj\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.703899 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpwxj\" (UniqueName: \"kubernetes.io/projected/86959832-935a-46cc-85bc-f0b9b39340a7-kube-api-access-zpwxj\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.703978 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/86959832-935a-46cc-85bc-f0b9b39340a7-webhook-cert\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.704021 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/86959832-935a-46cc-85bc-f0b9b39340a7-apiservice-cert\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.714924 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-d44d656bf-lksks"] Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.715815 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.724446 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.724738 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-jsvdk" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.724919 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.726205 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/86959832-935a-46cc-85bc-f0b9b39340a7-apiservice-cert\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.726986 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpwxj\" (UniqueName: \"kubernetes.io/projected/86959832-935a-46cc-85bc-f0b9b39340a7-kube-api-access-zpwxj\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.727513 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/86959832-935a-46cc-85bc-f0b9b39340a7-webhook-cert\") pod \"metallb-operator-controller-manager-67f666fcfb-5vg8w\" (UID: \"86959832-935a-46cc-85bc-f0b9b39340a7\") " pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.735017 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d44d656bf-lksks"] Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.805827 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cwwgd\" (UniqueName: \"kubernetes.io/projected/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-kube-api-access-cwwgd\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.805899 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-apiservice-cert\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.806236 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-webhook-cert\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.889279 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.911883 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-webhook-cert\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.911966 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cwwgd\" (UniqueName: \"kubernetes.io/projected/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-kube-api-access-cwwgd\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.912001 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-apiservice-cert\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.916617 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-apiservice-cert\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.923207 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-webhook-cert\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:13 crc kubenswrapper[4706]: I1206 05:40:13.938010 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cwwgd\" (UniqueName: \"kubernetes.io/projected/2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726-kube-api-access-cwwgd\") pod \"metallb-operator-webhook-server-d44d656bf-lksks\" (UID: \"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726\") " pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:14 crc kubenswrapper[4706]: I1206 05:40:14.073738 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:14 crc kubenswrapper[4706]: I1206 05:40:14.255630 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w"] Dec 06 05:40:14 crc kubenswrapper[4706]: I1206 05:40:14.363025 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-d44d656bf-lksks"] Dec 06 05:40:14 crc kubenswrapper[4706]: W1206 05:40:14.364863 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f5bd7cc_4de4_4ff3_8c7a_5aeb79fd9726.slice/crio-3e88768e3ace9fe0f002113b87bf7db4f7cbcd7e92756f169ee58cb5bb2aaf8f WatchSource:0}: Error finding container 3e88768e3ace9fe0f002113b87bf7db4f7cbcd7e92756f169ee58cb5bb2aaf8f: Status 404 returned error can't find the container with id 3e88768e3ace9fe0f002113b87bf7db4f7cbcd7e92756f169ee58cb5bb2aaf8f Dec 06 05:40:14 crc kubenswrapper[4706]: I1206 05:40:14.983083 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" event={"ID":"86959832-935a-46cc-85bc-f0b9b39340a7","Type":"ContainerStarted","Data":"1565470fe5393565a79bc41b0785f7148fdc7aaf2b098286adcd25a09158c419"} Dec 06 05:40:14 crc kubenswrapper[4706]: I1206 05:40:14.984168 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" event={"ID":"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726","Type":"ContainerStarted","Data":"3e88768e3ace9fe0f002113b87bf7db4f7cbcd7e92756f169ee58cb5bb2aaf8f"} Dec 06 05:40:21 crc kubenswrapper[4706]: I1206 05:40:21.016102 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" event={"ID":"2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726","Type":"ContainerStarted","Data":"0dc9c1618666deaffbea799d2963fc8ac2b0584d8474a92df49dfc26a56a81f7"} Dec 06 05:40:21 crc kubenswrapper[4706]: I1206 05:40:21.016548 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:21 crc kubenswrapper[4706]: I1206 05:40:21.017407 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" event={"ID":"86959832-935a-46cc-85bc-f0b9b39340a7","Type":"ContainerStarted","Data":"d1050865f0a2f113327906ab4a260cf1d19ae60e9c2ddbc7ae9d27f2634e7bae"} Dec 06 05:40:21 crc kubenswrapper[4706]: I1206 05:40:21.017567 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:21 crc kubenswrapper[4706]: I1206 05:40:21.049974 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" podStartSLOduration=2.164662611 podStartE2EDuration="8.049927343s" podCreationTimestamp="2025-12-06 05:40:13 +0000 UTC" firstStartedPulling="2025-12-06 05:40:14.368667926 +0000 UTC m=+1236.696491870" lastFinishedPulling="2025-12-06 05:40:20.253932658 +0000 UTC m=+1242.581756602" observedRunningTime="2025-12-06 05:40:21.035411165 +0000 UTC m=+1243.363235109" watchObservedRunningTime="2025-12-06 05:40:21.049927343 +0000 UTC m=+1243.377751317" Dec 06 05:40:21 crc kubenswrapper[4706]: I1206 05:40:21.083747 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" podStartSLOduration=2.120272215 podStartE2EDuration="8.08371764s" podCreationTimestamp="2025-12-06 05:40:13 +0000 UTC" firstStartedPulling="2025-12-06 05:40:14.269112206 +0000 UTC m=+1236.596936150" lastFinishedPulling="2025-12-06 05:40:20.232557631 +0000 UTC m=+1242.560381575" observedRunningTime="2025-12-06 05:40:21.079042251 +0000 UTC m=+1243.406866195" watchObservedRunningTime="2025-12-06 05:40:21.08371764 +0000 UTC m=+1243.411541604" Dec 06 05:40:34 crc kubenswrapper[4706]: I1206 05:40:34.079734 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-d44d656bf-lksks" Dec 06 05:40:35 crc kubenswrapper[4706]: I1206 05:40:35.962281 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:40:35 crc kubenswrapper[4706]: I1206 05:40:35.963086 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:40:53 crc kubenswrapper[4706]: I1206 05:40:53.891687 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-67f666fcfb-5vg8w" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.647799 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm"] Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.648832 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.651351 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.651587 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-2qjw2" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.660084 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-84c87"] Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.662558 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.665552 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.665640 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.669888 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm"] Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.726417 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-snzn5"] Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.727364 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.729127 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-wdbgb" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.729615 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.729876 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.730033 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.761576 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-f8648f98b-5gg6v"] Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.762424 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.767337 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.780349 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-5gg6v"] Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789634 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-metrics\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789689 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-startup\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789712 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-reloader\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789736 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-sockets\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789755 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-conf\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789799 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q444m\" (UniqueName: \"kubernetes.io/projected/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-kube-api-access-q444m\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789832 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8bmr\" (UniqueName: \"kubernetes.io/projected/90735168-5b70-4282-9d00-6ca91facf758-kube-api-access-v8bmr\") pod \"frr-k8s-webhook-server-7fcb986d4-hwzgm\" (UID: \"90735168-5b70-4282-9d00-6ca91facf758\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789854 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-metrics-certs\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.789877 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/90735168-5b70-4282-9d00-6ca91facf758-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-hwzgm\" (UID: \"90735168-5b70-4282-9d00-6ca91facf758\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891370 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-sockets\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-conf\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891436 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7009f978-2926-401b-bb27-4378dac2d69a-cert\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891455 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7009f978-2926-401b-bb27-4378dac2d69a-metrics-certs\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891472 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktrdp\" (UniqueName: \"kubernetes.io/projected/7009f978-2926-401b-bb27-4378dac2d69a-kube-api-access-ktrdp\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891493 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sxnwn\" (UniqueName: \"kubernetes.io/projected/6d8b765c-bd65-44fb-a959-b458e0c531a4-kube-api-access-sxnwn\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891510 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/6d8b765c-bd65-44fb-a959-b458e0c531a4-metallb-excludel2\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891528 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q444m\" (UniqueName: \"kubernetes.io/projected/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-kube-api-access-q444m\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891546 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8bmr\" (UniqueName: \"kubernetes.io/projected/90735168-5b70-4282-9d00-6ca91facf758-kube-api-access-v8bmr\") pod \"frr-k8s-webhook-server-7fcb986d4-hwzgm\" (UID: \"90735168-5b70-4282-9d00-6ca91facf758\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891567 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-metrics-certs\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891586 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/90735168-5b70-4282-9d00-6ca91facf758-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-hwzgm\" (UID: \"90735168-5b70-4282-9d00-6ca91facf758\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891618 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-metrics\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891640 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-metrics-certs\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891662 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-startup\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891676 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-memberlist\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891694 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-reloader\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891824 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-sockets\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.891998 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-reloader\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.892273 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-conf\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.893201 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-metrics\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.898088 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-frr-startup\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.902815 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-metrics-certs\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.902971 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/90735168-5b70-4282-9d00-6ca91facf758-cert\") pod \"frr-k8s-webhook-server-7fcb986d4-hwzgm\" (UID: \"90735168-5b70-4282-9d00-6ca91facf758\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.906577 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q444m\" (UniqueName: \"kubernetes.io/projected/b7f21e0e-b99e-4c3a-9a01-6016f5e3542f-kube-api-access-q444m\") pod \"frr-k8s-84c87\" (UID: \"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f\") " pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.906654 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8bmr\" (UniqueName: \"kubernetes.io/projected/90735168-5b70-4282-9d00-6ca91facf758-kube-api-access-v8bmr\") pod \"frr-k8s-webhook-server-7fcb986d4-hwzgm\" (UID: \"90735168-5b70-4282-9d00-6ca91facf758\") " pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.965704 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.985434 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-84c87" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.992808 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-memberlist\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.993110 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7009f978-2926-401b-bb27-4378dac2d69a-cert\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.993260 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7009f978-2926-401b-bb27-4378dac2d69a-metrics-certs\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.993403 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktrdp\" (UniqueName: \"kubernetes.io/projected/7009f978-2926-401b-bb27-4378dac2d69a-kube-api-access-ktrdp\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.993514 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sxnwn\" (UniqueName: \"kubernetes.io/projected/6d8b765c-bd65-44fb-a959-b458e0c531a4-kube-api-access-sxnwn\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.993645 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/6d8b765c-bd65-44fb-a959-b458e0c531a4-metallb-excludel2\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.993803 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-metrics-certs\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: E1206 05:40:54.994757 4706 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Dec 06 05:40:54 crc kubenswrapper[4706]: E1206 05:40:54.994923 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-memberlist podName:6d8b765c-bd65-44fb-a959-b458e0c531a4 nodeName:}" failed. No retries permitted until 2025-12-06 05:40:55.494885038 +0000 UTC m=+1277.822708982 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-memberlist") pod "speaker-snzn5" (UID: "6d8b765c-bd65-44fb-a959-b458e0c531a4") : secret "metallb-memberlist" not found Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.995783 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/6d8b765c-bd65-44fb-a959-b458e0c531a4-metallb-excludel2\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.997642 4706 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Dec 06 05:40:54 crc kubenswrapper[4706]: I1206 05:40:54.999778 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7009f978-2926-401b-bb27-4378dac2d69a-metrics-certs\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.002631 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-metrics-certs\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.009877 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/7009f978-2926-401b-bb27-4378dac2d69a-cert\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.012661 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktrdp\" (UniqueName: \"kubernetes.io/projected/7009f978-2926-401b-bb27-4378dac2d69a-kube-api-access-ktrdp\") pod \"controller-f8648f98b-5gg6v\" (UID: \"7009f978-2926-401b-bb27-4378dac2d69a\") " pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.017466 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sxnwn\" (UniqueName: \"kubernetes.io/projected/6d8b765c-bd65-44fb-a959-b458e0c531a4-kube-api-access-sxnwn\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.093384 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.168610 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm"] Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.251510 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" event={"ID":"90735168-5b70-4282-9d00-6ca91facf758","Type":"ContainerStarted","Data":"fbef9338a164d25e031cd54ddc4f7d3aa01997b940b1404ac590948607e9c686"} Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.257097 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"28d2f6b255b064e345d4c7aaefa2700c2030ff69274a6a107fc6f47e5fc2f3eb"} Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.294351 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-f8648f98b-5gg6v"] Dec 06 05:40:55 crc kubenswrapper[4706]: W1206 05:40:55.299292 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7009f978_2926_401b_bb27_4378dac2d69a.slice/crio-8963290e14fd0781feca8b80edce36a6af23b9dd381c3243397fdbfbea045575 WatchSource:0}: Error finding container 8963290e14fd0781feca8b80edce36a6af23b9dd381c3243397fdbfbea045575: Status 404 returned error can't find the container with id 8963290e14fd0781feca8b80edce36a6af23b9dd381c3243397fdbfbea045575 Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.499225 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-memberlist\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.504822 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6d8b765c-bd65-44fb-a959-b458e0c531a4-memberlist\") pod \"speaker-snzn5\" (UID: \"6d8b765c-bd65-44fb-a959-b458e0c531a4\") " pod="metallb-system/speaker-snzn5" Dec 06 05:40:55 crc kubenswrapper[4706]: I1206 05:40:55.642332 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-snzn5" Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.267319 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-snzn5" event={"ID":"6d8b765c-bd65-44fb-a959-b458e0c531a4","Type":"ContainerStarted","Data":"f164238dd7a2a956576fc98201c6d009d0265cec95f4c83e781505b45bfe343b"} Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.267693 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-snzn5" event={"ID":"6d8b765c-bd65-44fb-a959-b458e0c531a4","Type":"ContainerStarted","Data":"69be12780788bd37df856d53f3e3912fab62590dd2b1359c3a1c04861ff8c10c"} Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.267708 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-snzn5" event={"ID":"6d8b765c-bd65-44fb-a959-b458e0c531a4","Type":"ContainerStarted","Data":"f5303c6958814375514d65315193434974e14f04fa3756b17a21c3827849fe6f"} Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.267863 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-snzn5" Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.269858 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5gg6v" event={"ID":"7009f978-2926-401b-bb27-4378dac2d69a","Type":"ContainerStarted","Data":"b06f4d18749400bfa7fff3a9f91b204a3cbc7e98dc442b8f9c24ac966213abe8"} Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.269886 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5gg6v" event={"ID":"7009f978-2926-401b-bb27-4378dac2d69a","Type":"ContainerStarted","Data":"188d1e6c700b950215b06a767217334fd11a541c07530db82c580f553e74094b"} Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.269896 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-f8648f98b-5gg6v" event={"ID":"7009f978-2926-401b-bb27-4378dac2d69a","Type":"ContainerStarted","Data":"8963290e14fd0781feca8b80edce36a6af23b9dd381c3243397fdbfbea045575"} Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.270011 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.313590 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-snzn5" podStartSLOduration=2.313567206 podStartE2EDuration="2.313567206s" podCreationTimestamp="2025-12-06 05:40:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:40:56.293434964 +0000 UTC m=+1278.621258908" watchObservedRunningTime="2025-12-06 05:40:56.313567206 +0000 UTC m=+1278.641391160" Dec 06 05:40:56 crc kubenswrapper[4706]: I1206 05:40:56.314286 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-f8648f98b-5gg6v" podStartSLOduration=2.314279476 podStartE2EDuration="2.314279476s" podCreationTimestamp="2025-12-06 05:40:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:40:56.308327432 +0000 UTC m=+1278.636151376" watchObservedRunningTime="2025-12-06 05:40:56.314279476 +0000 UTC m=+1278.642103420" Dec 06 05:41:03 crc kubenswrapper[4706]: I1206 05:41:03.330730 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" event={"ID":"90735168-5b70-4282-9d00-6ca91facf758","Type":"ContainerStarted","Data":"6bcb8088661b1611afc2d23558653a4cc35ed61a2b19ae7cc82848e53ce238b1"} Dec 06 05:41:03 crc kubenswrapper[4706]: I1206 05:41:03.331366 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:41:03 crc kubenswrapper[4706]: I1206 05:41:03.333450 4706 generic.go:334] "Generic (PLEG): container finished" podID="b7f21e0e-b99e-4c3a-9a01-6016f5e3542f" containerID="5350508e4dc8dce42fb4406f9cd552301944477dc1d87c75aa3ff93b6bb8da86" exitCode=0 Dec 06 05:41:03 crc kubenswrapper[4706]: I1206 05:41:03.333525 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerDied","Data":"5350508e4dc8dce42fb4406f9cd552301944477dc1d87c75aa3ff93b6bb8da86"} Dec 06 05:41:03 crc kubenswrapper[4706]: I1206 05:41:03.348822 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" podStartSLOduration=2.25867735 podStartE2EDuration="9.348806109s" podCreationTimestamp="2025-12-06 05:40:54 +0000 UTC" firstStartedPulling="2025-12-06 05:40:55.199228201 +0000 UTC m=+1277.527052145" lastFinishedPulling="2025-12-06 05:41:02.28935695 +0000 UTC m=+1284.617180904" observedRunningTime="2025-12-06 05:41:03.346486296 +0000 UTC m=+1285.674310240" watchObservedRunningTime="2025-12-06 05:41:03.348806109 +0000 UTC m=+1285.676630053" Dec 06 05:41:04 crc kubenswrapper[4706]: I1206 05:41:04.340548 4706 generic.go:334] "Generic (PLEG): container finished" podID="b7f21e0e-b99e-4c3a-9a01-6016f5e3542f" containerID="32366792579f4014ff552aa3aeee584925ef68ad43d86ba9e689a42bf52d097f" exitCode=0 Dec 06 05:41:04 crc kubenswrapper[4706]: I1206 05:41:04.340614 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerDied","Data":"32366792579f4014ff552aa3aeee584925ef68ad43d86ba9e689a42bf52d097f"} Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.099343 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-f8648f98b-5gg6v" Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.348210 4706 generic.go:334] "Generic (PLEG): container finished" podID="b7f21e0e-b99e-4c3a-9a01-6016f5e3542f" containerID="4ab1d0b8056b08f63aef3f2cfa8fdd9f9d86632640aaa0c2f400e3c350f2f6c0" exitCode=0 Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.348250 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerDied","Data":"4ab1d0b8056b08f63aef3f2cfa8fdd9f9d86632640aaa0c2f400e3c350f2f6c0"} Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.647778 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-snzn5" Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.961164 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.961744 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.961823 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.963311 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7cb88f72dc580dec882828d525bf28a4003301f3e0567fd190938d53e4a87ab0"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:41:05 crc kubenswrapper[4706]: I1206 05:41:05.963534 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://7cb88f72dc580dec882828d525bf28a4003301f3e0567fd190938d53e4a87ab0" gracePeriod=600 Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.360027 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="7cb88f72dc580dec882828d525bf28a4003301f3e0567fd190938d53e4a87ab0" exitCode=0 Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.360137 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"7cb88f72dc580dec882828d525bf28a4003301f3e0567fd190938d53e4a87ab0"} Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.360241 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"71fb78259889c3e53f18a29621b104746019c251e6090d6297b3d1c61fdcf223"} Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.360278 4706 scope.go:117] "RemoveContainer" containerID="5837ae2ad3340b198002bcadcaff039fe17103dc504dd99a597185b1f1d89acf" Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.368038 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"2181c87983c8ceaf7cd342a42658753376a7618422ad6929d9560f182e635096"} Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.368116 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"31b0b49ba8733759213324f8b687f4beaf28121e1770476d5be8b38e668bf1cd"} Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.368130 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"d560c152f7df060fcb6bc9592db7298d6ef7577db3db92ff04003105cb4b2937"} Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.368139 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"2d9eafbbb2f3a48683f41e324f991a2e8f71bf6f5ae34060ae98207e3c0287a3"} Dec 06 05:41:06 crc kubenswrapper[4706]: I1206 05:41:06.368148 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"a5bb3af75f28e7839cefe52836642e569c64dcea160b91b89cd27e27f5dc881c"} Dec 06 05:41:07 crc kubenswrapper[4706]: I1206 05:41:07.382751 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-84c87" event={"ID":"b7f21e0e-b99e-4c3a-9a01-6016f5e3542f","Type":"ContainerStarted","Data":"5c1f273a20da8b8bd07c7aaa9b1f64fd72fce421c2cf59f18daafeff37167a17"} Dec 06 05:41:07 crc kubenswrapper[4706]: I1206 05:41:07.383269 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-84c87" Dec 06 05:41:07 crc kubenswrapper[4706]: I1206 05:41:07.464942 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-84c87" podStartSLOduration=6.340324338 podStartE2EDuration="13.464928001s" podCreationTimestamp="2025-12-06 05:40:54 +0000 UTC" firstStartedPulling="2025-12-06 05:40:55.132398909 +0000 UTC m=+1277.460222863" lastFinishedPulling="2025-12-06 05:41:02.257002572 +0000 UTC m=+1284.584826526" observedRunningTime="2025-12-06 05:41:07.46340886 +0000 UTC m=+1289.791232804" watchObservedRunningTime="2025-12-06 05:41:07.464928001 +0000 UTC m=+1289.792751945" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.536558 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8rnvz"] Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.537715 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.539825 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.540274 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.594789 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-rpv5m" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.599238 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8rnvz"] Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.611104 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9gjx\" (UniqueName: \"kubernetes.io/projected/e58cfb8c-25eb-42c3-8309-8e5e9174e62f-kube-api-access-n9gjx\") pod \"openstack-operator-index-8rnvz\" (UID: \"e58cfb8c-25eb-42c3-8309-8e5e9174e62f\") " pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.711821 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9gjx\" (UniqueName: \"kubernetes.io/projected/e58cfb8c-25eb-42c3-8309-8e5e9174e62f-kube-api-access-n9gjx\") pod \"openstack-operator-index-8rnvz\" (UID: \"e58cfb8c-25eb-42c3-8309-8e5e9174e62f\") " pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.742813 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9gjx\" (UniqueName: \"kubernetes.io/projected/e58cfb8c-25eb-42c3-8309-8e5e9174e62f-kube-api-access-n9gjx\") pod \"openstack-operator-index-8rnvz\" (UID: \"e58cfb8c-25eb-42c3-8309-8e5e9174e62f\") " pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:08 crc kubenswrapper[4706]: I1206 05:41:08.915519 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:09 crc kubenswrapper[4706]: I1206 05:41:09.312873 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8rnvz"] Dec 06 05:41:09 crc kubenswrapper[4706]: W1206 05:41:09.323639 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode58cfb8c_25eb_42c3_8309_8e5e9174e62f.slice/crio-8d3f2ca28f1c571b146a25c6f3ba2791d641c65ead2de8ccad46ca5b0f91f753 WatchSource:0}: Error finding container 8d3f2ca28f1c571b146a25c6f3ba2791d641c65ead2de8ccad46ca5b0f91f753: Status 404 returned error can't find the container with id 8d3f2ca28f1c571b146a25c6f3ba2791d641c65ead2de8ccad46ca5b0f91f753 Dec 06 05:41:09 crc kubenswrapper[4706]: I1206 05:41:09.397259 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8rnvz" event={"ID":"e58cfb8c-25eb-42c3-8309-8e5e9174e62f","Type":"ContainerStarted","Data":"8d3f2ca28f1c571b146a25c6f3ba2791d641c65ead2de8ccad46ca5b0f91f753"} Dec 06 05:41:09 crc kubenswrapper[4706]: I1206 05:41:09.987813 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-84c87" Dec 06 05:41:10 crc kubenswrapper[4706]: I1206 05:41:10.105496 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-84c87" Dec 06 05:41:11 crc kubenswrapper[4706]: I1206 05:41:11.515729 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8rnvz"] Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.126959 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-8stsm"] Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.128734 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.136417 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8stsm"] Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.267324 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz8rs\" (UniqueName: \"kubernetes.io/projected/c7986937-a648-4cc0-89ae-e718dcccffad-kube-api-access-lz8rs\") pod \"openstack-operator-index-8stsm\" (UID: \"c7986937-a648-4cc0-89ae-e718dcccffad\") " pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.368244 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz8rs\" (UniqueName: \"kubernetes.io/projected/c7986937-a648-4cc0-89ae-e718dcccffad-kube-api-access-lz8rs\") pod \"openstack-operator-index-8stsm\" (UID: \"c7986937-a648-4cc0-89ae-e718dcccffad\") " pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.401042 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz8rs\" (UniqueName: \"kubernetes.io/projected/c7986937-a648-4cc0-89ae-e718dcccffad-kube-api-access-lz8rs\") pod \"openstack-operator-index-8stsm\" (UID: \"c7986937-a648-4cc0-89ae-e718dcccffad\") " pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:12 crc kubenswrapper[4706]: I1206 05:41:12.461818 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:14 crc kubenswrapper[4706]: I1206 05:41:14.969755 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7fcb986d4-hwzgm" Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.106443 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-8stsm"] Dec 06 05:41:16 crc kubenswrapper[4706]: W1206 05:41:16.112263 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc7986937_a648_4cc0_89ae_e718dcccffad.slice/crio-50791071b9488b108e67d6f9f437038eb498ec92f3e0839b80c9bf3eed70cc50 WatchSource:0}: Error finding container 50791071b9488b108e67d6f9f437038eb498ec92f3e0839b80c9bf3eed70cc50: Status 404 returned error can't find the container with id 50791071b9488b108e67d6f9f437038eb498ec92f3e0839b80c9bf3eed70cc50 Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.494027 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8stsm" event={"ID":"c7986937-a648-4cc0-89ae-e718dcccffad","Type":"ContainerStarted","Data":"2b1dec890a4914761140effafb356f0e770d0255070dd695a1adb5174ebb40e8"} Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.494091 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8stsm" event={"ID":"c7986937-a648-4cc0-89ae-e718dcccffad","Type":"ContainerStarted","Data":"50791071b9488b108e67d6f9f437038eb498ec92f3e0839b80c9bf3eed70cc50"} Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.495709 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8rnvz" event={"ID":"e58cfb8c-25eb-42c3-8309-8e5e9174e62f","Type":"ContainerStarted","Data":"207667b25fab90afcefc2226934da8d0b05e5b8abd4585023262e02d6db6b6dc"} Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.495779 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-8rnvz" podUID="e58cfb8c-25eb-42c3-8309-8e5e9174e62f" containerName="registry-server" containerID="cri-o://207667b25fab90afcefc2226934da8d0b05e5b8abd4585023262e02d6db6b6dc" gracePeriod=2 Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.512032 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8stsm" podStartSLOduration=4.448795876 podStartE2EDuration="4.512015429s" podCreationTimestamp="2025-12-06 05:41:12 +0000 UTC" firstStartedPulling="2025-12-06 05:41:16.11854752 +0000 UTC m=+1298.446371464" lastFinishedPulling="2025-12-06 05:41:16.181767073 +0000 UTC m=+1298.509591017" observedRunningTime="2025-12-06 05:41:16.507512015 +0000 UTC m=+1298.835335959" watchObservedRunningTime="2025-12-06 05:41:16.512015429 +0000 UTC m=+1298.839839373" Dec 06 05:41:16 crc kubenswrapper[4706]: I1206 05:41:16.525284 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-8rnvz" podStartSLOduration=2.134752787 podStartE2EDuration="8.525269692s" podCreationTimestamp="2025-12-06 05:41:08 +0000 UTC" firstStartedPulling="2025-12-06 05:41:09.325761205 +0000 UTC m=+1291.653585169" lastFinishedPulling="2025-12-06 05:41:15.71627813 +0000 UTC m=+1298.044102074" observedRunningTime="2025-12-06 05:41:16.523478833 +0000 UTC m=+1298.851302777" watchObservedRunningTime="2025-12-06 05:41:16.525269692 +0000 UTC m=+1298.853093636" Dec 06 05:41:17 crc kubenswrapper[4706]: I1206 05:41:17.502127 4706 generic.go:334] "Generic (PLEG): container finished" podID="e58cfb8c-25eb-42c3-8309-8e5e9174e62f" containerID="207667b25fab90afcefc2226934da8d0b05e5b8abd4585023262e02d6db6b6dc" exitCode=0 Dec 06 05:41:17 crc kubenswrapper[4706]: I1206 05:41:17.502221 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8rnvz" event={"ID":"e58cfb8c-25eb-42c3-8309-8e5e9174e62f","Type":"ContainerDied","Data":"207667b25fab90afcefc2226934da8d0b05e5b8abd4585023262e02d6db6b6dc"} Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.442822 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.509680 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-8rnvz" event={"ID":"e58cfb8c-25eb-42c3-8309-8e5e9174e62f","Type":"ContainerDied","Data":"8d3f2ca28f1c571b146a25c6f3ba2791d641c65ead2de8ccad46ca5b0f91f753"} Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.509729 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-8rnvz" Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.509757 4706 scope.go:117] "RemoveContainer" containerID="207667b25fab90afcefc2226934da8d0b05e5b8abd4585023262e02d6db6b6dc" Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.553446 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9gjx\" (UniqueName: \"kubernetes.io/projected/e58cfb8c-25eb-42c3-8309-8e5e9174e62f-kube-api-access-n9gjx\") pod \"e58cfb8c-25eb-42c3-8309-8e5e9174e62f\" (UID: \"e58cfb8c-25eb-42c3-8309-8e5e9174e62f\") " Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.559460 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e58cfb8c-25eb-42c3-8309-8e5e9174e62f-kube-api-access-n9gjx" (OuterVolumeSpecName: "kube-api-access-n9gjx") pod "e58cfb8c-25eb-42c3-8309-8e5e9174e62f" (UID: "e58cfb8c-25eb-42c3-8309-8e5e9174e62f"). InnerVolumeSpecName "kube-api-access-n9gjx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.654285 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9gjx\" (UniqueName: \"kubernetes.io/projected/e58cfb8c-25eb-42c3-8309-8e5e9174e62f-kube-api-access-n9gjx\") on node \"crc\" DevicePath \"\"" Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.838554 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-8rnvz"] Dec 06 05:41:18 crc kubenswrapper[4706]: I1206 05:41:18.843147 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-8rnvz"] Dec 06 05:41:20 crc kubenswrapper[4706]: I1206 05:41:20.042116 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e58cfb8c-25eb-42c3-8309-8e5e9174e62f" path="/var/lib/kubelet/pods/e58cfb8c-25eb-42c3-8309-8e5e9174e62f/volumes" Dec 06 05:41:22 crc kubenswrapper[4706]: I1206 05:41:22.462570 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:22 crc kubenswrapper[4706]: I1206 05:41:22.462968 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:22 crc kubenswrapper[4706]: I1206 05:41:22.521903 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:22 crc kubenswrapper[4706]: I1206 05:41:22.568883 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-8stsm" Dec 06 05:41:24 crc kubenswrapper[4706]: I1206 05:41:24.988002 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-84c87" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.177676 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv"] Dec 06 05:41:25 crc kubenswrapper[4706]: E1206 05:41:25.177931 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e58cfb8c-25eb-42c3-8309-8e5e9174e62f" containerName="registry-server" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.177949 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e58cfb8c-25eb-42c3-8309-8e5e9174e62f" containerName="registry-server" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.178077 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e58cfb8c-25eb-42c3-8309-8e5e9174e62f" containerName="registry-server" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.178876 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.184008 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-6qf5l" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.199270 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv"] Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.336696 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-grsvv\" (UniqueName: \"kubernetes.io/projected/2add93a7-b496-4008-b764-b43a05be4967-kube-api-access-grsvv\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.336778 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-bundle\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.336844 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-util\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.438234 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-grsvv\" (UniqueName: \"kubernetes.io/projected/2add93a7-b496-4008-b764-b43a05be4967-kube-api-access-grsvv\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.438309 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-bundle\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.438352 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-util\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.438896 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-util\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.438935 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-bundle\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.463096 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-grsvv\" (UniqueName: \"kubernetes.io/projected/2add93a7-b496-4008-b764-b43a05be4967-kube-api-access-grsvv\") pod \"01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.497966 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:25 crc kubenswrapper[4706]: I1206 05:41:25.713278 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv"] Dec 06 05:41:26 crc kubenswrapper[4706]: I1206 05:41:26.560380 4706 generic.go:334] "Generic (PLEG): container finished" podID="2add93a7-b496-4008-b764-b43a05be4967" containerID="eef66f27121fb566c2d1fcf45e5109f57e1b17acd8d9496f75bf6a6e59fe0ab0" exitCode=0 Dec 06 05:41:26 crc kubenswrapper[4706]: I1206 05:41:26.560472 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" event={"ID":"2add93a7-b496-4008-b764-b43a05be4967","Type":"ContainerDied","Data":"eef66f27121fb566c2d1fcf45e5109f57e1b17acd8d9496f75bf6a6e59fe0ab0"} Dec 06 05:41:26 crc kubenswrapper[4706]: I1206 05:41:26.560686 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" event={"ID":"2add93a7-b496-4008-b764-b43a05be4967","Type":"ContainerStarted","Data":"73b093c8a6b7c128dd123638f12eadfa6b11e55e4abef1411ecf99bf8b35dd51"} Dec 06 05:41:27 crc kubenswrapper[4706]: I1206 05:41:27.569120 4706 generic.go:334] "Generic (PLEG): container finished" podID="2add93a7-b496-4008-b764-b43a05be4967" containerID="9455e5dceb1232bc69f1b2597382116eb8c379ca271649305e0e7cf277690415" exitCode=0 Dec 06 05:41:27 crc kubenswrapper[4706]: I1206 05:41:27.569166 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" event={"ID":"2add93a7-b496-4008-b764-b43a05be4967","Type":"ContainerDied","Data":"9455e5dceb1232bc69f1b2597382116eb8c379ca271649305e0e7cf277690415"} Dec 06 05:41:28 crc kubenswrapper[4706]: I1206 05:41:28.578177 4706 generic.go:334] "Generic (PLEG): container finished" podID="2add93a7-b496-4008-b764-b43a05be4967" containerID="440666fd7a256b3c3315b693162ca8ec6c0aa6cde922774f51f737ec62d92d1c" exitCode=0 Dec 06 05:41:28 crc kubenswrapper[4706]: I1206 05:41:28.578256 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" event={"ID":"2add93a7-b496-4008-b764-b43a05be4967","Type":"ContainerDied","Data":"440666fd7a256b3c3315b693162ca8ec6c0aa6cde922774f51f737ec62d92d1c"} Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.863016 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.901747 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-util\") pod \"2add93a7-b496-4008-b764-b43a05be4967\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.901819 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-bundle\") pod \"2add93a7-b496-4008-b764-b43a05be4967\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.901854 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-grsvv\" (UniqueName: \"kubernetes.io/projected/2add93a7-b496-4008-b764-b43a05be4967-kube-api-access-grsvv\") pod \"2add93a7-b496-4008-b764-b43a05be4967\" (UID: \"2add93a7-b496-4008-b764-b43a05be4967\") " Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.902598 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-bundle" (OuterVolumeSpecName: "bundle") pod "2add93a7-b496-4008-b764-b43a05be4967" (UID: "2add93a7-b496-4008-b764-b43a05be4967"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.911344 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2add93a7-b496-4008-b764-b43a05be4967-kube-api-access-grsvv" (OuterVolumeSpecName: "kube-api-access-grsvv") pod "2add93a7-b496-4008-b764-b43a05be4967" (UID: "2add93a7-b496-4008-b764-b43a05be4967"). InnerVolumeSpecName "kube-api-access-grsvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:41:29 crc kubenswrapper[4706]: I1206 05:41:29.917552 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-util" (OuterVolumeSpecName: "util") pod "2add93a7-b496-4008-b764-b43a05be4967" (UID: "2add93a7-b496-4008-b764-b43a05be4967"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:41:30 crc kubenswrapper[4706]: I1206 05:41:30.002462 4706 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:41:30 crc kubenswrapper[4706]: I1206 05:41:30.002494 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-grsvv\" (UniqueName: \"kubernetes.io/projected/2add93a7-b496-4008-b764-b43a05be4967-kube-api-access-grsvv\") on node \"crc\" DevicePath \"\"" Dec 06 05:41:30 crc kubenswrapper[4706]: I1206 05:41:30.002508 4706 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/2add93a7-b496-4008-b764-b43a05be4967-util\") on node \"crc\" DevicePath \"\"" Dec 06 05:41:30 crc kubenswrapper[4706]: I1206 05:41:30.595400 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" event={"ID":"2add93a7-b496-4008-b764-b43a05be4967","Type":"ContainerDied","Data":"73b093c8a6b7c128dd123638f12eadfa6b11e55e4abef1411ecf99bf8b35dd51"} Dec 06 05:41:30 crc kubenswrapper[4706]: I1206 05:41:30.595789 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="73b093c8a6b7c128dd123638f12eadfa6b11e55e4abef1411ecf99bf8b35dd51" Dec 06 05:41:30 crc kubenswrapper[4706]: I1206 05:41:30.595483 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.865445 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r"] Dec 06 05:41:31 crc kubenswrapper[4706]: E1206 05:41:31.865714 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="util" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.865729 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="util" Dec 06 05:41:31 crc kubenswrapper[4706]: E1206 05:41:31.865743 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="pull" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.865751 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="pull" Dec 06 05:41:31 crc kubenswrapper[4706]: E1206 05:41:31.865766 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="extract" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.865774 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="extract" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.865899 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2add93a7-b496-4008-b764-b43a05be4967" containerName="extract" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.866399 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.869338 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-42txg" Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.889118 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r"] Dec 06 05:41:31 crc kubenswrapper[4706]: I1206 05:41:31.930947 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntps9\" (UniqueName: \"kubernetes.io/projected/235972bf-6d17-4167-b41f-98483ea3f1ba-kube-api-access-ntps9\") pod \"openstack-operator-controller-operator-5964599cfc-xxv5r\" (UID: \"235972bf-6d17-4167-b41f-98483ea3f1ba\") " pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:41:32 crc kubenswrapper[4706]: I1206 05:41:32.031939 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntps9\" (UniqueName: \"kubernetes.io/projected/235972bf-6d17-4167-b41f-98483ea3f1ba-kube-api-access-ntps9\") pod \"openstack-operator-controller-operator-5964599cfc-xxv5r\" (UID: \"235972bf-6d17-4167-b41f-98483ea3f1ba\") " pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:41:32 crc kubenswrapper[4706]: I1206 05:41:32.061241 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntps9\" (UniqueName: \"kubernetes.io/projected/235972bf-6d17-4167-b41f-98483ea3f1ba-kube-api-access-ntps9\") pod \"openstack-operator-controller-operator-5964599cfc-xxv5r\" (UID: \"235972bf-6d17-4167-b41f-98483ea3f1ba\") " pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:41:32 crc kubenswrapper[4706]: I1206 05:41:32.183753 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:41:32 crc kubenswrapper[4706]: I1206 05:41:32.438576 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r"] Dec 06 05:41:32 crc kubenswrapper[4706]: I1206 05:41:32.605770 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" event={"ID":"235972bf-6d17-4167-b41f-98483ea3f1ba","Type":"ContainerStarted","Data":"8e5d7122d6de35169a08ddfc241fde80e297d259a1ea4b4046eebbebf50e8724"} Dec 06 05:41:37 crc kubenswrapper[4706]: I1206 05:41:37.635405 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" event={"ID":"235972bf-6d17-4167-b41f-98483ea3f1ba","Type":"ContainerStarted","Data":"f1ff619ec9354996f9bad66602c1932b99b385ab4f480cc097f9b899b9520db8"} Dec 06 05:41:39 crc kubenswrapper[4706]: I1206 05:41:39.659041 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:41:39 crc kubenswrapper[4706]: I1206 05:41:39.691162 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" podStartSLOduration=4.207970093 podStartE2EDuration="8.691144709s" podCreationTimestamp="2025-12-06 05:41:31 +0000 UTC" firstStartedPulling="2025-12-06 05:41:32.453515938 +0000 UTC m=+1314.781339882" lastFinishedPulling="2025-12-06 05:41:36.936690554 +0000 UTC m=+1319.264514498" observedRunningTime="2025-12-06 05:41:39.687097349 +0000 UTC m=+1322.014921313" watchObservedRunningTime="2025-12-06 05:41:39.691144709 +0000 UTC m=+1322.018968653" Dec 06 05:41:42 crc kubenswrapper[4706]: I1206 05:41:42.187116 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5964599cfc-xxv5r" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.379679 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.381349 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.383682 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-jvqnk" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.394069 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.395018 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.397542 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-r5snc" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.406672 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.414104 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.420001 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.421263 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.426091 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-vphtz" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.436113 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.437032 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.442373 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-wp9f4" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.450966 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.462614 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k789j\" (UniqueName: \"kubernetes.io/projected/31b78248-5727-4a30-95ab-d75acc5a752b-kube-api-access-k789j\") pod \"barbican-operator-controller-manager-7d9dfd778-mpkjv\" (UID: \"31b78248-5727-4a30-95ab-d75acc5a752b\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.468664 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.469861 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.474350 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-rv96p" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.492142 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.493458 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.499614 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-th7c2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.501989 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.513716 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.539770 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.548136 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.549250 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.550024 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.550946 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.551828 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.552106 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-6jqvt" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.563658 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.564581 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.565035 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-4dfrw" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.565442 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmdz5\" (UniqueName: \"kubernetes.io/projected/9e547dc3-41db-48ab-b791-885c0f98f4c8-kube-api-access-vmdz5\") pod \"cinder-operator-controller-manager-859b6ccc6-msm2n\" (UID: \"9e547dc3-41db-48ab-b791-885c0f98f4c8\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.565503 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmfwg\" (UniqueName: \"kubernetes.io/projected/74049eb3-6721-4234-80cd-01b530d2d9e5-kube-api-access-bmfwg\") pod \"designate-operator-controller-manager-78b4bc895b-wzlpz\" (UID: \"74049eb3-6721-4234-80cd-01b530d2d9e5\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.565539 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkmcc\" (UniqueName: \"kubernetes.io/projected/646d8bbb-f505-42f9-a23d-15b999c5acce-kube-api-access-jkmcc\") pod \"heat-operator-controller-manager-5f64f6f8bb-qr75r\" (UID: \"646d8bbb-f505-42f9-a23d-15b999c5acce\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.565574 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k789j\" (UniqueName: \"kubernetes.io/projected/31b78248-5727-4a30-95ab-d75acc5a752b-kube-api-access-k789j\") pod \"barbican-operator-controller-manager-7d9dfd778-mpkjv\" (UID: \"31b78248-5727-4a30-95ab-d75acc5a752b\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.571604 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-qlpk9" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.587775 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.596965 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.607899 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k789j\" (UniqueName: \"kubernetes.io/projected/31b78248-5727-4a30-95ab-d75acc5a752b-kube-api-access-k789j\") pod \"barbican-operator-controller-manager-7d9dfd778-mpkjv\" (UID: \"31b78248-5727-4a30-95ab-d75acc5a752b\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.627105 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.633910 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.635009 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.638064 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.638825 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.646153 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.647152 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.649623 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-q5nhc" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.660427 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-gthb5" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.660522 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-kkrh4" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.660715 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.673063 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.683824 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9jgf\" (UniqueName: \"kubernetes.io/projected/b6524ab6-7d15-4cf4-b3b2-dc9f0d014930-kube-api-access-k9jgf\") pod \"horizon-operator-controller-manager-68c6d99b8f-fcp7z\" (UID: \"b6524ab6-7d15-4cf4-b3b2-dc9f0d014930\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.683936 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmfwg\" (UniqueName: \"kubernetes.io/projected/74049eb3-6721-4234-80cd-01b530d2d9e5-kube-api-access-bmfwg\") pod \"designate-operator-controller-manager-78b4bc895b-wzlpz\" (UID: \"74049eb3-6721-4234-80cd-01b530d2d9e5\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.683968 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.683987 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szbdq\" (UniqueName: \"kubernetes.io/projected/de139c22-08fa-4b45-abda-af9394c16eac-kube-api-access-szbdq\") pod \"glance-operator-controller-manager-77987cd8cd-vm2sj\" (UID: \"de139c22-08fa-4b45-abda-af9394c16eac\") " pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.684055 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkmcc\" (UniqueName: \"kubernetes.io/projected/646d8bbb-f505-42f9-a23d-15b999c5acce-kube-api-access-jkmcc\") pod \"heat-operator-controller-manager-5f64f6f8bb-qr75r\" (UID: \"646d8bbb-f505-42f9-a23d-15b999c5acce\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.684093 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pcmrr\" (UniqueName: \"kubernetes.io/projected/eacc98a4-22bf-4a38-8de0-2bf6fd395572-kube-api-access-pcmrr\") pod \"ironic-operator-controller-manager-6c548fd776-jspvh\" (UID: \"eacc98a4-22bf-4a38-8de0-2bf6fd395572\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.684150 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxh7b\" (UniqueName: \"kubernetes.io/projected/34163fc1-16c7-4942-9eda-5afb77180d00-kube-api-access-nxh7b\") pod \"keystone-operator-controller-manager-7765d96ddf-jvwv2\" (UID: \"34163fc1-16c7-4942-9eda-5afb77180d00\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.684210 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmdz5\" (UniqueName: \"kubernetes.io/projected/9e547dc3-41db-48ab-b791-885c0f98f4c8-kube-api-access-vmdz5\") pod \"cinder-operator-controller-manager-859b6ccc6-msm2n\" (UID: \"9e547dc3-41db-48ab-b791-885c0f98f4c8\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.684234 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48trg\" (UniqueName: \"kubernetes.io/projected/0e17be2a-d936-4d91-862a-b92014212bf6-kube-api-access-48trg\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.698886 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-86rsf" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.714228 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.714444 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.727380 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.733147 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.745786 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.747041 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.750671 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmfwg\" (UniqueName: \"kubernetes.io/projected/74049eb3-6721-4234-80cd-01b530d2d9e5-kube-api-access-bmfwg\") pod \"designate-operator-controller-manager-78b4bc895b-wzlpz\" (UID: \"74049eb3-6721-4234-80cd-01b530d2d9e5\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.755693 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-l292b" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.757104 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkmcc\" (UniqueName: \"kubernetes.io/projected/646d8bbb-f505-42f9-a23d-15b999c5acce-kube-api-access-jkmcc\") pod \"heat-operator-controller-manager-5f64f6f8bb-qr75r\" (UID: \"646d8bbb-f505-42f9-a23d-15b999c5acce\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.763481 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.763862 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmdz5\" (UniqueName: \"kubernetes.io/projected/9e547dc3-41db-48ab-b791-885c0f98f4c8-kube-api-access-vmdz5\") pod \"cinder-operator-controller-manager-859b6ccc6-msm2n\" (UID: \"9e547dc3-41db-48ab-b791-885c0f98f4c8\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788060 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mz99\" (UniqueName: \"kubernetes.io/projected/5f25d928-9f7a-4d1b-b1bb-abc58dad2080-kube-api-access-2mz99\") pod \"mariadb-operator-controller-manager-56bbcc9d85-nhzq9\" (UID: \"5f25d928-9f7a-4d1b-b1bb-abc58dad2080\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788121 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pcmrr\" (UniqueName: \"kubernetes.io/projected/eacc98a4-22bf-4a38-8de0-2bf6fd395572-kube-api-access-pcmrr\") pod \"ironic-operator-controller-manager-6c548fd776-jspvh\" (UID: \"eacc98a4-22bf-4a38-8de0-2bf6fd395572\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788151 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxh7b\" (UniqueName: \"kubernetes.io/projected/34163fc1-16c7-4942-9eda-5afb77180d00-kube-api-access-nxh7b\") pod \"keystone-operator-controller-manager-7765d96ddf-jvwv2\" (UID: \"34163fc1-16c7-4942-9eda-5afb77180d00\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788183 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48trg\" (UniqueName: \"kubernetes.io/projected/0e17be2a-d936-4d91-862a-b92014212bf6-kube-api-access-48trg\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788204 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdkmt\" (UniqueName: \"kubernetes.io/projected/0928e1f4-7912-465f-a991-9d0dda0a42d1-kube-api-access-pdkmt\") pod \"nova-operator-controller-manager-697bc559fc-qfpfj\" (UID: \"0928e1f4-7912-465f-a991-9d0dda0a42d1\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788221 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj52f\" (UniqueName: \"kubernetes.io/projected/b67589f2-8ee8-43a3-aaf9-e1767c0a75c5-kube-api-access-sj52f\") pod \"manila-operator-controller-manager-7c79b5df47-xctf2\" (UID: \"b67589f2-8ee8-43a3-aaf9-e1767c0a75c5\") " pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788241 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9jgf\" (UniqueName: \"kubernetes.io/projected/b6524ab6-7d15-4cf4-b3b2-dc9f0d014930-kube-api-access-k9jgf\") pod \"horizon-operator-controller-manager-68c6d99b8f-fcp7z\" (UID: \"b6524ab6-7d15-4cf4-b3b2-dc9f0d014930\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788267 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58wvq\" (UniqueName: \"kubernetes.io/projected/d28af7d8-b64b-48f1-9ac1-7f1cfc361751-kube-api-access-58wvq\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-bkvhv\" (UID: \"d28af7d8-b64b-48f1-9ac1-7f1cfc361751\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788292 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqwsm\" (UniqueName: \"kubernetes.io/projected/b980759b-88cf-47ee-b7b0-12ebaddba6cd-kube-api-access-mqwsm\") pod \"octavia-operator-controller-manager-998648c74-k5hqn\" (UID: \"b980759b-88cf-47ee-b7b0-12ebaddba6cd\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788310 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.788333 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szbdq\" (UniqueName: \"kubernetes.io/projected/de139c22-08fa-4b45-abda-af9394c16eac-kube-api-access-szbdq\") pod \"glance-operator-controller-manager-77987cd8cd-vm2sj\" (UID: \"de139c22-08fa-4b45-abda-af9394c16eac\") " pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:42:00 crc kubenswrapper[4706]: E1206 05:42:00.789098 4706 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:00 crc kubenswrapper[4706]: E1206 05:42:00.789143 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert podName:0e17be2a-d936-4d91-862a-b92014212bf6 nodeName:}" failed. No retries permitted until 2025-12-06 05:42:01.289127876 +0000 UTC m=+1343.616951820 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert") pod "infra-operator-controller-manager-57548d458d-x7wwl" (UID: "0e17be2a-d936-4d91-862a-b92014212bf6") : secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.810826 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.837131 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.838398 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.838493 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.847275 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.847581 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-dnbxq" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.859652 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9jgf\" (UniqueName: \"kubernetes.io/projected/b6524ab6-7d15-4cf4-b3b2-dc9f0d014930-kube-api-access-k9jgf\") pod \"horizon-operator-controller-manager-68c6d99b8f-fcp7z\" (UID: \"b6524ab6-7d15-4cf4-b3b2-dc9f0d014930\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.870979 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxh7b\" (UniqueName: \"kubernetes.io/projected/34163fc1-16c7-4942-9eda-5afb77180d00-kube-api-access-nxh7b\") pod \"keystone-operator-controller-manager-7765d96ddf-jvwv2\" (UID: \"34163fc1-16c7-4942-9eda-5afb77180d00\") " pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.874615 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pcmrr\" (UniqueName: \"kubernetes.io/projected/eacc98a4-22bf-4a38-8de0-2bf6fd395572-kube-api-access-pcmrr\") pod \"ironic-operator-controller-manager-6c548fd776-jspvh\" (UID: \"eacc98a4-22bf-4a38-8de0-2bf6fd395572\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.880744 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szbdq\" (UniqueName: \"kubernetes.io/projected/de139c22-08fa-4b45-abda-af9394c16eac-kube-api-access-szbdq\") pod \"glance-operator-controller-manager-77987cd8cd-vm2sj\" (UID: \"de139c22-08fa-4b45-abda-af9394c16eac\") " pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.889305 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48trg\" (UniqueName: \"kubernetes.io/projected/0e17be2a-d936-4d91-862a-b92014212bf6-kube-api-access-48trg\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.890798 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdkmt\" (UniqueName: \"kubernetes.io/projected/0928e1f4-7912-465f-a991-9d0dda0a42d1-kube-api-access-pdkmt\") pod \"nova-operator-controller-manager-697bc559fc-qfpfj\" (UID: \"0928e1f4-7912-465f-a991-9d0dda0a42d1\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.890828 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj52f\" (UniqueName: \"kubernetes.io/projected/b67589f2-8ee8-43a3-aaf9-e1767c0a75c5-kube-api-access-sj52f\") pod \"manila-operator-controller-manager-7c79b5df47-xctf2\" (UID: \"b67589f2-8ee8-43a3-aaf9-e1767c0a75c5\") " pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.890860 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58wvq\" (UniqueName: \"kubernetes.io/projected/d28af7d8-b64b-48f1-9ac1-7f1cfc361751-kube-api-access-58wvq\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-bkvhv\" (UID: \"d28af7d8-b64b-48f1-9ac1-7f1cfc361751\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.890890 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqwsm\" (UniqueName: \"kubernetes.io/projected/b980759b-88cf-47ee-b7b0-12ebaddba6cd-kube-api-access-mqwsm\") pod \"octavia-operator-controller-manager-998648c74-k5hqn\" (UID: \"b980759b-88cf-47ee-b7b0-12ebaddba6cd\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.890923 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mz99\" (UniqueName: \"kubernetes.io/projected/5f25d928-9f7a-4d1b-b1bb-abc58dad2080-kube-api-access-2mz99\") pod \"mariadb-operator-controller-manager-56bbcc9d85-nhzq9\" (UID: \"5f25d928-9f7a-4d1b-b1bb-abc58dad2080\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.895821 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.896901 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.898560 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.902856 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.915169 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-ndwvh" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.920101 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.921249 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.923940 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-2fjkc" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.932483 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mz99\" (UniqueName: \"kubernetes.io/projected/5f25d928-9f7a-4d1b-b1bb-abc58dad2080-kube-api-access-2mz99\") pod \"mariadb-operator-controller-manager-56bbcc9d85-nhzq9\" (UID: \"5f25d928-9f7a-4d1b-b1bb-abc58dad2080\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.940788 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.941582 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdkmt\" (UniqueName: \"kubernetes.io/projected/0928e1f4-7912-465f-a991-9d0dda0a42d1-kube-api-access-pdkmt\") pod \"nova-operator-controller-manager-697bc559fc-qfpfj\" (UID: \"0928e1f4-7912-465f-a991-9d0dda0a42d1\") " pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.954249 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58wvq\" (UniqueName: \"kubernetes.io/projected/d28af7d8-b64b-48f1-9ac1-7f1cfc361751-kube-api-access-58wvq\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-bkvhv\" (UID: \"d28af7d8-b64b-48f1-9ac1-7f1cfc361751\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.959085 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqwsm\" (UniqueName: \"kubernetes.io/projected/b980759b-88cf-47ee-b7b0-12ebaddba6cd-kube-api-access-mqwsm\") pod \"octavia-operator-controller-manager-998648c74-k5hqn\" (UID: \"b980759b-88cf-47ee-b7b0-12ebaddba6cd\") " pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.963314 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8"] Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.962231 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj52f\" (UniqueName: \"kubernetes.io/projected/b67589f2-8ee8-43a3-aaf9-e1767c0a75c5-kube-api-access-sj52f\") pod \"manila-operator-controller-manager-7c79b5df47-xctf2\" (UID: \"b67589f2-8ee8-43a3-aaf9-e1767c0a75c5\") " pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:00 crc kubenswrapper[4706]: I1206 05:42:00.990748 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:00.991595 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:00.991676 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wdr4\" (UniqueName: \"kubernetes.io/projected/09479c44-e706-4f72-a1f3-6b71d4b29f0b-kube-api-access-4wdr4\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.000905 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.005125 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.006188 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.008682 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-979mz" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.018304 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.039376 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.040680 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.077116 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.078428 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.086797 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-cmctp" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.087445 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.099852 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wdr4\" (UniqueName: \"kubernetes.io/projected/09479c44-e706-4f72-a1f3-6b71d4b29f0b-kube-api-access-4wdr4\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.099888 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sw4zp\" (UniqueName: \"kubernetes.io/projected/73d3329e-7a93-4d32-b7ba-0d5d6b468432-kube-api-access-sw4zp\") pod \"swift-operator-controller-manager-5f8c65bbfc-jc6r8\" (UID: \"73d3329e-7a93-4d32-b7ba-0d5d6b468432\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.099911 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnslp\" (UniqueName: \"kubernetes.io/projected/bfd8649f-6345-40be-9193-e80b2ce0c1dc-kube-api-access-lnslp\") pod \"telemetry-operator-controller-manager-76cc84c6bb-lpjp5\" (UID: \"bfd8649f-6345-40be-9193-e80b2ce0c1dc\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.099931 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t55qr\" (UniqueName: \"kubernetes.io/projected/9914167a-34c0-42fc-ac0c-af6f866b437f-kube-api-access-t55qr\") pod \"ovn-operator-controller-manager-b6456fdb6-q9dk8\" (UID: \"9914167a-34c0-42fc-ac0c-af6f866b437f\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.099950 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwjhf\" (UniqueName: \"kubernetes.io/projected/47a5741f-61c5-4de3-b020-50c25f0570f2-kube-api-access-gwjhf\") pod \"placement-operator-controller-manager-78f8948974-gz7v6\" (UID: \"47a5741f-61c5-4de3-b020-50c25f0570f2\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.100032 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.101362 4706 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.101398 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert podName:09479c44-e706-4f72-a1f3-6b71d4b29f0b nodeName:}" failed. No retries permitted until 2025-12-06 05:42:01.601385119 +0000 UTC m=+1343.929209063 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" (UID: "09479c44-e706-4f72-a1f3-6b71d4b29f0b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.102017 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.115916 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.123430 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.130589 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.158909 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wdr4\" (UniqueName: \"kubernetes.io/projected/09479c44-e706-4f72-a1f3-6b71d4b29f0b-kube-api-access-4wdr4\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.196817 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.200940 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sw4zp\" (UniqueName: \"kubernetes.io/projected/73d3329e-7a93-4d32-b7ba-0d5d6b468432-kube-api-access-sw4zp\") pod \"swift-operator-controller-manager-5f8c65bbfc-jc6r8\" (UID: \"73d3329e-7a93-4d32-b7ba-0d5d6b468432\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.200976 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnslp\" (UniqueName: \"kubernetes.io/projected/bfd8649f-6345-40be-9193-e80b2ce0c1dc-kube-api-access-lnslp\") pod \"telemetry-operator-controller-manager-76cc84c6bb-lpjp5\" (UID: \"bfd8649f-6345-40be-9193-e80b2ce0c1dc\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.201004 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t55qr\" (UniqueName: \"kubernetes.io/projected/9914167a-34c0-42fc-ac0c-af6f866b437f-kube-api-access-t55qr\") pod \"ovn-operator-controller-manager-b6456fdb6-q9dk8\" (UID: \"9914167a-34c0-42fc-ac0c-af6f866b437f\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.201026 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwjhf\" (UniqueName: \"kubernetes.io/projected/47a5741f-61c5-4de3-b020-50c25f0570f2-kube-api-access-gwjhf\") pod \"placement-operator-controller-manager-78f8948974-gz7v6\" (UID: \"47a5741f-61c5-4de3-b020-50c25f0570f2\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.208521 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.223401 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwjhf\" (UniqueName: \"kubernetes.io/projected/47a5741f-61c5-4de3-b020-50c25f0570f2-kube-api-access-gwjhf\") pod \"placement-operator-controller-manager-78f8948974-gz7v6\" (UID: \"47a5741f-61c5-4de3-b020-50c25f0570f2\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.229925 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.237936 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.241713 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-zcjvj" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.246270 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sw4zp\" (UniqueName: \"kubernetes.io/projected/73d3329e-7a93-4d32-b7ba-0d5d6b468432-kube-api-access-sw4zp\") pod \"swift-operator-controller-manager-5f8c65bbfc-jc6r8\" (UID: \"73d3329e-7a93-4d32-b7ba-0d5d6b468432\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.262145 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t55qr\" (UniqueName: \"kubernetes.io/projected/9914167a-34c0-42fc-ac0c-af6f866b437f-kube-api-access-t55qr\") pod \"ovn-operator-controller-manager-b6456fdb6-q9dk8\" (UID: \"9914167a-34c0-42fc-ac0c-af6f866b437f\") " pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.262467 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.267237 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.273998 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnslp\" (UniqueName: \"kubernetes.io/projected/bfd8649f-6345-40be-9193-e80b2ce0c1dc-kube-api-access-lnslp\") pod \"telemetry-operator-controller-manager-76cc84c6bb-lpjp5\" (UID: \"bfd8649f-6345-40be-9193-e80b2ce0c1dc\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.287854 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.302163 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.302238 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkxqs\" (UniqueName: \"kubernetes.io/projected/ff8a3a6e-0623-417c-8e02-f16f34e3bfe9-kube-api-access-lkxqs\") pod \"test-operator-controller-manager-5854674fcc-qmnhr\" (UID: \"ff8a3a6e-0623-417c-8e02-f16f34e3bfe9\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.302703 4706 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.302843 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert podName:0e17be2a-d936-4d91-862a-b92014212bf6 nodeName:}" failed. No retries permitted until 2025-12-06 05:42:02.302822662 +0000 UTC m=+1344.630646606 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert") pod "infra-operator-controller-manager-57548d458d-x7wwl" (UID: "0e17be2a-d936-4d91-862a-b92014212bf6") : secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.303622 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.305385 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.311609 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.317732 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-rdgqq" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.383748 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.407878 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkxqs\" (UniqueName: \"kubernetes.io/projected/ff8a3a6e-0623-417c-8e02-f16f34e3bfe9-kube-api-access-lkxqs\") pod \"test-operator-controller-manager-5854674fcc-qmnhr\" (UID: \"ff8a3a6e-0623-417c-8e02-f16f34e3bfe9\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.428759 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.430624 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.431531 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.436081 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkxqs\" (UniqueName: \"kubernetes.io/projected/ff8a3a6e-0623-417c-8e02-f16f34e3bfe9-kube-api-access-lkxqs\") pod \"test-operator-controller-manager-5854674fcc-qmnhr\" (UID: \"ff8a3a6e-0623-417c-8e02-f16f34e3bfe9\") " pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.437633 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.437809 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.437995 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-f4sp5" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.449240 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.467373 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.469129 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.470962 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-wk2d4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.473099 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv"] Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.486262 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.509370 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvzhp\" (UniqueName: \"kubernetes.io/projected/1d93b83c-6e45-44bf-b9b1-d6163c85d6b1-kube-api-access-cvzhp\") pod \"watcher-operator-controller-manager-769dc69bc-tx6k9\" (UID: \"1d93b83c-6e45-44bf-b9b1-d6163c85d6b1\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.612423 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvzhp\" (UniqueName: \"kubernetes.io/projected/1d93b83c-6e45-44bf-b9b1-d6163c85d6b1-kube-api-access-cvzhp\") pod \"watcher-operator-controller-manager-769dc69bc-tx6k9\" (UID: \"1d93b83c-6e45-44bf-b9b1-d6163c85d6b1\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.612467 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95gwt\" (UniqueName: \"kubernetes.io/projected/2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb-kube-api-access-95gwt\") pod \"rabbitmq-cluster-operator-manager-668c99d594-pmfhv\" (UID: \"2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.612491 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.612571 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7gp57\" (UniqueName: \"kubernetes.io/projected/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-kube-api-access-7gp57\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.612598 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.612625 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.612742 4706 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.612790 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert podName:09479c44-e706-4f72-a1f3-6b71d4b29f0b nodeName:}" failed. No retries permitted until 2025-12-06 05:42:02.61277683 +0000 UTC m=+1344.940600774 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" (UID: "09479c44-e706-4f72-a1f3-6b71d4b29f0b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.642674 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvzhp\" (UniqueName: \"kubernetes.io/projected/1d93b83c-6e45-44bf-b9b1-d6163c85d6b1-kube-api-access-cvzhp\") pod \"watcher-operator-controller-manager-769dc69bc-tx6k9\" (UID: \"1d93b83c-6e45-44bf-b9b1-d6163c85d6b1\") " pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.714808 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7gp57\" (UniqueName: \"kubernetes.io/projected/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-kube-api-access-7gp57\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.715161 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.715212 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95gwt\" (UniqueName: \"kubernetes.io/projected/2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb-kube-api-access-95gwt\") pod \"rabbitmq-cluster-operator-manager-668c99d594-pmfhv\" (UID: \"2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.715235 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.715375 4706 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.715422 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:02.215407895 +0000 UTC m=+1344.543231839 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "metrics-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.715666 4706 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: E1206 05:42:01.715695 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:02.215688222 +0000 UTC m=+1344.543512156 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "webhook-server-cert" not found Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.745285 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7gp57\" (UniqueName: \"kubernetes.io/projected/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-kube-api-access-7gp57\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.745791 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95gwt\" (UniqueName: \"kubernetes.io/projected/2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb-kube-api-access-95gwt\") pod \"rabbitmq-cluster-operator-manager-668c99d594-pmfhv\" (UID: \"2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" Dec 06 05:42:01 crc kubenswrapper[4706]: I1206 05:42:01.958198 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.040284 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.205727 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv"] Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.228159 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2"] Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.233000 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.233088 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.233242 4706 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.233286 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:03.233272465 +0000 UTC m=+1345.561096409 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "metrics-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.233387 4706 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.233459 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:03.233437749 +0000 UTC m=+1345.561261733 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "webhook-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.251656 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r"] Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.333803 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.334002 4706 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.334071 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert podName:0e17be2a-d936-4d91-862a-b92014212bf6 nodeName:}" failed. No retries permitted until 2025-12-06 05:42:04.334039057 +0000 UTC m=+1346.661863002 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert") pod "infra-operator-controller-manager-57548d458d-x7wwl" (UID: "0e17be2a-d936-4d91-862a-b92014212bf6") : secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.387470 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n"] Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.411566 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh"] Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.638241 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.638388 4706 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: E1206 05:42:02.638609 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert podName:09479c44-e706-4f72-a1f3-6b71d4b29f0b nodeName:}" failed. No retries permitted until 2025-12-06 05:42:04.638592799 +0000 UTC m=+1346.966416743 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" (UID: "09479c44-e706-4f72-a1f3-6b71d4b29f0b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.932897 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" event={"ID":"34163fc1-16c7-4942-9eda-5afb77180d00","Type":"ContainerStarted","Data":"cbe0e7b0328e7e3c12dbd8970efc71001372a24a1050d998a12235e2809406d3"} Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.933711 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" event={"ID":"646d8bbb-f505-42f9-a23d-15b999c5acce","Type":"ContainerStarted","Data":"440beffb1e932fbcee717bc678257dd5ca9c170a555c1a7e77bfe9d00daa9f35"} Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.934889 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" event={"ID":"31b78248-5727-4a30-95ab-d75acc5a752b","Type":"ContainerStarted","Data":"2c78463d9b3913ec62e33ee23e846a2fc77fb1cea1b76b25e21b6896d73be459"} Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.935843 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" event={"ID":"eacc98a4-22bf-4a38-8de0-2bf6fd395572","Type":"ContainerStarted","Data":"a14718ed80cee80dddfd4a5685e22c1cf1236dd8fc4dcc08116a38475b4eef21"} Dec 06 05:42:02 crc kubenswrapper[4706]: I1206 05:42:02.936833 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" event={"ID":"9e547dc3-41db-48ab-b791-885c0f98f4c8","Type":"ContainerStarted","Data":"84e9920949b639bbd206c5d09a6da5157d7d8dbb59b66bd7d23575378f967398"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.250487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.250607 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.250670 4706 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.250749 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:05.250730413 +0000 UTC m=+1347.578554357 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "webhook-server-cert" not found Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.250759 4706 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.250812 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:05.250798044 +0000 UTC m=+1347.578621978 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "metrics-server-cert" not found Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.264730 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.285392 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.298426 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.307798 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.316274 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.322307 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.331583 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.339811 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.380499 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv"] Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.422584 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mqwsm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-k5hqn_openstack-operators(b980759b-88cf-47ee-b7b0-12ebaddba6cd): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.422731 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lkxqs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-qmnhr_openstack-operators(ff8a3a6e-0623-417c-8e02-f16f34e3bfe9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.422844 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sw4zp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-jc6r8_openstack-operators(73d3329e-7a93-4d32-b7ba-0d5d6b468432): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.428827 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-mqwsm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-998648c74-k5hqn_openstack-operators(b980759b-88cf-47ee-b7b0-12ebaddba6cd): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.428987 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sw4zp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-jc6r8_openstack-operators(73d3329e-7a93-4d32-b7ba-0d5d6b468432): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.430226 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" podUID="73d3329e-7a93-4d32-b7ba-0d5d6b468432" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.430308 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" podUID="b980759b-88cf-47ee-b7b0-12ebaddba6cd" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.431794 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9"] Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.436773 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-lkxqs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-5854674fcc-qmnhr_openstack-operators(ff8a3a6e-0623-417c-8e02-f16f34e3bfe9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.439135 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" podUID="ff8a3a6e-0623-417c-8e02-f16f34e3bfe9" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.447493 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pdkmt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-qfpfj_openstack-operators(0928e1f4-7912-465f-a991-9d0dda0a42d1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.450991 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8"] Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.451282 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-pdkmt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-697bc559fc-qfpfj_openstack-operators(0928e1f4-7912-465f-a991-9d0dda0a42d1): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.451464 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:9f68d7bc8c6bce38f46dee8a8272d5365c49fe7b32b2af52e8ac884e212f3a85,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bmfwg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-78b4bc895b-wzlpz_openstack-operators(74049eb3-6721-4234-80cd-01b530d2d9e5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.453878 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" podUID="0928e1f4-7912-465f-a991-9d0dda0a42d1" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.457526 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr"] Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.467009 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bmfwg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-78b4bc895b-wzlpz_openstack-operators(74049eb3-6721-4234-80cd-01b530d2d9e5): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.468190 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" podUID="74049eb3-6721-4234-80cd-01b530d2d9e5" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.473208 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.481107 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.489526 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z"] Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.963032 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" event={"ID":"1d93b83c-6e45-44bf-b9b1-d6163c85d6b1","Type":"ContainerStarted","Data":"52bec10deea0769c531d469be2ef651448f4d0fcc68a92d9d241f0eb4353133c"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.965729 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" event={"ID":"2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb","Type":"ContainerStarted","Data":"38592e6b02e366ccb527cf215ffe517dcac1b3266e37aebc4eb7afef9ac3dca9"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.967732 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" event={"ID":"73d3329e-7a93-4d32-b7ba-0d5d6b468432","Type":"ContainerStarted","Data":"97e433d59fe681df05b484aeb6498640d55eaf4b6362ddfa83872cf53f2b4831"} Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.987214 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" podUID="73d3329e-7a93-4d32-b7ba-0d5d6b468432" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.987451 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" event={"ID":"5f25d928-9f7a-4d1b-b1bb-abc58dad2080","Type":"ContainerStarted","Data":"8e6298d4123d2d81e54ed7445ae0aac982f739b1d4535a15e11a60e15ea28c1b"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.988645 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" event={"ID":"47a5741f-61c5-4de3-b020-50c25f0570f2","Type":"ContainerStarted","Data":"d7f80c29fe93da7d86c034518645a36708dd3c8f608cd7ca098b29e7bb762d33"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.989726 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" event={"ID":"0928e1f4-7912-465f-a991-9d0dda0a42d1","Type":"ContainerStarted","Data":"1e343c56bfe39c8b60c41c17167df39bb437733272c1b0bd94ec6f4f8fa5920e"} Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.991539 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" podUID="0928e1f4-7912-465f-a991-9d0dda0a42d1" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.991619 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" event={"ID":"9914167a-34c0-42fc-ac0c-af6f866b437f","Type":"ContainerStarted","Data":"57ee361d523c3f63aa3397db09906b6ca0c895cc28098c99ea89b5d0f931dd3e"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.992422 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" event={"ID":"d28af7d8-b64b-48f1-9ac1-7f1cfc361751","Type":"ContainerStarted","Data":"67194e1bf02b8d1e34c0f8bcf94263ce873d63dd188b4797b15cdd61ffc3e72c"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.993908 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" event={"ID":"74049eb3-6721-4234-80cd-01b530d2d9e5","Type":"ContainerStarted","Data":"89e9653ee495abc3237be657650f5af8e8ada974c3d424af2eb1d1a7a88861b0"} Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.995098 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" event={"ID":"bfd8649f-6345-40be-9193-e80b2ce0c1dc","Type":"ContainerStarted","Data":"7917507206d302a003fb7d505045f7b8031fd402440293534c8f4bc75a1f5087"} Dec 06 05:42:03 crc kubenswrapper[4706]: E1206 05:42:03.995173 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:9f68d7bc8c6bce38f46dee8a8272d5365c49fe7b32b2af52e8ac884e212f3a85\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" podUID="74049eb3-6721-4234-80cd-01b530d2d9e5" Dec 06 05:42:03 crc kubenswrapper[4706]: I1206 05:42:03.997346 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" event={"ID":"b67589f2-8ee8-43a3-aaf9-e1767c0a75c5","Type":"ContainerStarted","Data":"a0deb2713c6cce6cbc44e2753384e8ce4a25ce5d35ddb978c78d38d9020b61d2"} Dec 06 05:42:04 crc kubenswrapper[4706]: I1206 05:42:04.008905 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" event={"ID":"b980759b-88cf-47ee-b7b0-12ebaddba6cd","Type":"ContainerStarted","Data":"7047cf6b8304087c7a96d005be12a6f3cb43a8aed86a48d7b58ffa7cf338a3dc"} Dec 06 05:42:04 crc kubenswrapper[4706]: I1206 05:42:04.010511 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" event={"ID":"b6524ab6-7d15-4cf4-b3b2-dc9f0d014930","Type":"ContainerStarted","Data":"1487c176de6cc52f8d610375c01034a6fd3885b283cfc31a4f85531f43187053"} Dec 06 05:42:04 crc kubenswrapper[4706]: I1206 05:42:04.012620 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" event={"ID":"de139c22-08fa-4b45-abda-af9394c16eac","Type":"ContainerStarted","Data":"d0b89e4372603d1dfda29df1d28e85720a686d0b227a2b018af4ba62d2d89ef6"} Dec 06 05:42:04 crc kubenswrapper[4706]: E1206 05:42:04.025332 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" podUID="b980759b-88cf-47ee-b7b0-12ebaddba6cd" Dec 06 05:42:04 crc kubenswrapper[4706]: I1206 05:42:04.026584 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" event={"ID":"ff8a3a6e-0623-417c-8e02-f16f34e3bfe9","Type":"ContainerStarted","Data":"6e8869188d284f8b2a253527571c4e406b0e8cbbd0b849300843dbdf43b7342a"} Dec 06 05:42:04 crc kubenswrapper[4706]: E1206 05:42:04.049417 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" podUID="ff8a3a6e-0623-417c-8e02-f16f34e3bfe9" Dec 06 05:42:04 crc kubenswrapper[4706]: I1206 05:42:04.373976 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:04 crc kubenswrapper[4706]: E1206 05:42:04.374218 4706 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:04 crc kubenswrapper[4706]: E1206 05:42:04.374266 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert podName:0e17be2a-d936-4d91-862a-b92014212bf6 nodeName:}" failed. No retries permitted until 2025-12-06 05:42:08.374251859 +0000 UTC m=+1350.702075803 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert") pod "infra-operator-controller-manager-57548d458d-x7wwl" (UID: "0e17be2a-d936-4d91-862a-b92014212bf6") : secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:04 crc kubenswrapper[4706]: I1206 05:42:04.688013 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:04 crc kubenswrapper[4706]: E1206 05:42:04.688516 4706 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:04 crc kubenswrapper[4706]: E1206 05:42:04.688573 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert podName:09479c44-e706-4f72-a1f3-6b71d4b29f0b nodeName:}" failed. No retries permitted until 2025-12-06 05:42:08.688555427 +0000 UTC m=+1351.016379371 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" (UID: "09479c44-e706-4f72-a1f3-6b71d4b29f0b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.045743 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:d9a3694865a7d54ee96397add18c3898886e98d079aa20876a0f4de1fa7a7168\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" podUID="b980759b-88cf-47ee-b7b0-12ebaddba6cd" Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.046114 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" podUID="73d3329e-7a93-4d32-b7ba-0d5d6b468432" Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.046187 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:9f68d7bc8c6bce38f46dee8a8272d5365c49fe7b32b2af52e8ac884e212f3a85\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" podUID="74049eb3-6721-4234-80cd-01b530d2d9e5" Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.047306 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:101b3e007d8c9f2e183262d7712f986ad51256448099069bc14f1ea5f997ab94\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" podUID="ff8a3a6e-0623-417c-8e02-f16f34e3bfe9" Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.047419 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:779f0cee6024d0fb8f259b036fe790e62aa5a3b0431ea9bf15a6e7d02e2e5670\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" podUID="0928e1f4-7912-465f-a991-9d0dda0a42d1" Dec 06 05:42:05 crc kubenswrapper[4706]: I1206 05:42:05.316717 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:05 crc kubenswrapper[4706]: I1206 05:42:05.316811 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.316913 4706 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.316990 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:09.316970458 +0000 UTC m=+1351.644794402 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "webhook-server-cert" not found Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.316993 4706 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 06 05:42:05 crc kubenswrapper[4706]: E1206 05:42:05.317069 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:09.31702932 +0000 UTC m=+1351.644853264 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "metrics-server-cert" not found Dec 06 05:42:08 crc kubenswrapper[4706]: I1206 05:42:08.470935 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:08 crc kubenswrapper[4706]: E1206 05:42:08.471584 4706 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:08 crc kubenswrapper[4706]: E1206 05:42:08.471737 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert podName:0e17be2a-d936-4d91-862a-b92014212bf6 nodeName:}" failed. No retries permitted until 2025-12-06 05:42:16.47171944 +0000 UTC m=+1358.799543384 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert") pod "infra-operator-controller-manager-57548d458d-x7wwl" (UID: "0e17be2a-d936-4d91-862a-b92014212bf6") : secret "infra-operator-webhook-server-cert" not found Dec 06 05:42:08 crc kubenswrapper[4706]: I1206 05:42:08.776707 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:08 crc kubenswrapper[4706]: E1206 05:42:08.776851 4706 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:08 crc kubenswrapper[4706]: E1206 05:42:08.776903 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert podName:09479c44-e706-4f72-a1f3-6b71d4b29f0b nodeName:}" failed. No retries permitted until 2025-12-06 05:42:16.776889468 +0000 UTC m=+1359.104713412 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert") pod "openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" (UID: "09479c44-e706-4f72-a1f3-6b71d4b29f0b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Dec 06 05:42:09 crc kubenswrapper[4706]: I1206 05:42:09.384847 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:09 crc kubenswrapper[4706]: I1206 05:42:09.384943 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:09 crc kubenswrapper[4706]: E1206 05:42:09.385085 4706 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Dec 06 05:42:09 crc kubenswrapper[4706]: E1206 05:42:09.385140 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:17.385123765 +0000 UTC m=+1359.712947709 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "metrics-server-cert" not found Dec 06 05:42:09 crc kubenswrapper[4706]: E1206 05:42:09.385135 4706 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 06 05:42:09 crc kubenswrapper[4706]: E1206 05:42:09.385224 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:17.385200887 +0000 UTC m=+1359.713024881 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "webhook-server-cert" not found Dec 06 05:42:16 crc kubenswrapper[4706]: I1206 05:42:16.494573 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:16 crc kubenswrapper[4706]: I1206 05:42:16.503129 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0e17be2a-d936-4d91-862a-b92014212bf6-cert\") pod \"infra-operator-controller-manager-57548d458d-x7wwl\" (UID: \"0e17be2a-d936-4d91-862a-b92014212bf6\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:16 crc kubenswrapper[4706]: I1206 05:42:16.771741 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:16 crc kubenswrapper[4706]: I1206 05:42:16.799397 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:16 crc kubenswrapper[4706]: I1206 05:42:16.816851 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/09479c44-e706-4f72-a1f3-6b71d4b29f0b-cert\") pod \"openstack-baremetal-operator-controller-manager-64bc77cfd455bk5\" (UID: \"09479c44-e706-4f72-a1f3-6b71d4b29f0b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:16 crc kubenswrapper[4706]: I1206 05:42:16.833973 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:17 crc kubenswrapper[4706]: I1206 05:42:17.405150 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:17 crc kubenswrapper[4706]: I1206 05:42:17.405222 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:17 crc kubenswrapper[4706]: E1206 05:42:17.405331 4706 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Dec 06 05:42:17 crc kubenswrapper[4706]: E1206 05:42:17.405418 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs podName:36973f56-f6d5-4a12-b86e-4ad7bcb3df6f nodeName:}" failed. No retries permitted until 2025-12-06 05:42:33.405397677 +0000 UTC m=+1375.733221641 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs") pod "openstack-operator-controller-manager-7f6f47b7b7-lmnn4" (UID: "36973f56-f6d5-4a12-b86e-4ad7bcb3df6f") : secret "webhook-server-cert" not found Dec 06 05:42:17 crc kubenswrapper[4706]: I1206 05:42:17.408504 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-metrics-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:33 crc kubenswrapper[4706]: I1206 05:42:33.441267 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:33 crc kubenswrapper[4706]: I1206 05:42:33.455196 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/36973f56-f6d5-4a12-b86e-4ad7bcb3df6f-webhook-certs\") pod \"openstack-operator-controller-manager-7f6f47b7b7-lmnn4\" (UID: \"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f\") " pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:33 crc kubenswrapper[4706]: I1206 05:42:33.518873 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:41 crc kubenswrapper[4706]: E1206 05:42:41.573457 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7" Dec 06 05:42:41 crc kubenswrapper[4706]: E1206 05:42:41.574176 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/mariadb-operator@sha256:600ca007e493d3af0fcc2ebac92e8da5efd2afe812b62d7d3d4dd0115bdf05d7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2mz99,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-nhzq9_openstack-operators(5f25d928-9f7a-4d1b-b1bb-abc58dad2080): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:42 crc kubenswrapper[4706]: E1206 05:42:42.180688 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59" Dec 06 05:42:42 crc kubenswrapper[4706]: E1206 05:42:42.180885 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:635a4aef9d6f0b799e8ec91333dbb312160c001d05b3c63f614c124e0b67cb59,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t55qr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-b6456fdb6-q9dk8_openstack-operators(9914167a-34c0-42fc-ac0c-af6f866b437f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:42 crc kubenswrapper[4706]: E1206 05:42:42.763002 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f" Dec 06 05:42:42 crc kubenswrapper[4706]: E1206 05:42:42.763922 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gwjhf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-gz7v6_openstack-operators(47a5741f-61c5-4de3-b020-50c25f0570f2): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:43 crc kubenswrapper[4706]: E1206 05:42:43.294533 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557" Dec 06 05:42:43 crc kubenswrapper[4706]: E1206 05:42:43.294720 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-58wvq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5fdfd5b6b5-bkvhv_openstack-operators(d28af7d8-b64b-48f1-9ac1-7f1cfc361751): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:43 crc kubenswrapper[4706]: E1206 05:42:43.666946 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:abdb733b01e92ac17f565762f30f1d075b44c16421bd06e557f6bb3c319e1809" Dec 06 05:42:43 crc kubenswrapper[4706]: E1206 05:42:43.667179 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:abdb733b01e92ac17f565762f30f1d075b44c16421bd06e557f6bb3c319e1809,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-szbdq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-77987cd8cd-vm2sj_openstack-operators(de139c22-08fa-4b45-abda-af9394c16eac): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:44 crc kubenswrapper[4706]: E1206 05:42:44.386695 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Dec 06 05:42:44 crc kubenswrapper[4706]: E1206 05:42:44.386891 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-95gwt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-pmfhv_openstack-operators(2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:44 crc kubenswrapper[4706]: E1206 05:42:44.388195 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" podUID="2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb" Dec 06 05:42:44 crc kubenswrapper[4706]: E1206 05:42:44.816193 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7" Dec 06 05:42:44 crc kubenswrapper[4706]: E1206 05:42:44.816370 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nxh7b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-7765d96ddf-jvwv2_openstack-operators(34163fc1-16c7-4942-9eda-5afb77180d00): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:42:45 crc kubenswrapper[4706]: E1206 05:42:45.318303 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" podUID="2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb" Dec 06 05:42:47 crc kubenswrapper[4706]: I1206 05:42:47.559460 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4"] Dec 06 05:42:47 crc kubenswrapper[4706]: I1206 05:42:47.618633 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5"] Dec 06 05:42:47 crc kubenswrapper[4706]: I1206 05:42:47.664143 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl"] Dec 06 05:42:47 crc kubenswrapper[4706]: W1206 05:42:47.834737 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36973f56_f6d5_4a12_b86e_4ad7bcb3df6f.slice/crio-d8950646da8a5d8f5c87afcf37877c831e42973568d310a20cb7e8356ccd5ea0 WatchSource:0}: Error finding container d8950646da8a5d8f5c87afcf37877c831e42973568d310a20cb7e8356ccd5ea0: Status 404 returned error can't find the container with id d8950646da8a5d8f5c87afcf37877c831e42973568d310a20cb7e8356ccd5ea0 Dec 06 05:42:47 crc kubenswrapper[4706]: W1206 05:42:47.837612 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod09479c44_e706_4f72_a1f3_6b71d4b29f0b.slice/crio-dcda47f3897c776aac1c0164066777232ac91bca0956134c9a09c3ba0c46eaa2 WatchSource:0}: Error finding container dcda47f3897c776aac1c0164066777232ac91bca0956134c9a09c3ba0c46eaa2: Status 404 returned error can't find the container with id dcda47f3897c776aac1c0164066777232ac91bca0956134c9a09c3ba0c46eaa2 Dec 06 05:42:47 crc kubenswrapper[4706]: W1206 05:42:47.838717 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0e17be2a_d936_4d91_862a_b92014212bf6.slice/crio-983189648c8e1248fdc4d468d3859d443c56fa0cc60e40a5a98ac0c67c996ff5 WatchSource:0}: Error finding container 983189648c8e1248fdc4d468d3859d443c56fa0cc60e40a5a98ac0c67c996ff5: Status 404 returned error can't find the container with id 983189648c8e1248fdc4d468d3859d443c56fa0cc60e40a5a98ac0c67c996ff5 Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.360873 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" event={"ID":"09479c44-e706-4f72-a1f3-6b71d4b29f0b","Type":"ContainerStarted","Data":"dcda47f3897c776aac1c0164066777232ac91bca0956134c9a09c3ba0c46eaa2"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.362557 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" event={"ID":"bfd8649f-6345-40be-9193-e80b2ce0c1dc","Type":"ContainerStarted","Data":"b0c3292e39e756d3fc300c6ff3707846bac2c21824adf566b63a81ef61280c65"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.367422 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" event={"ID":"31b78248-5727-4a30-95ab-d75acc5a752b","Type":"ContainerStarted","Data":"8c3dd4083e359b42a9b2981e5cefac4a5faf6963ceae69faef03098ef9967ccc"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.368836 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" event={"ID":"0e17be2a-d936-4d91-862a-b92014212bf6","Type":"ContainerStarted","Data":"983189648c8e1248fdc4d468d3859d443c56fa0cc60e40a5a98ac0c67c996ff5"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.371266 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" event={"ID":"1d93b83c-6e45-44bf-b9b1-d6163c85d6b1","Type":"ContainerStarted","Data":"b0a39380d2cabedc4bfea78970093d17636c369ab927e3485ee02c7936dd4158"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.372536 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" event={"ID":"b6524ab6-7d15-4cf4-b3b2-dc9f0d014930","Type":"ContainerStarted","Data":"956b874eb41482819486aec84e008052d56a21e273757fd1de854dd3d8a658db"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.376315 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" event={"ID":"9e547dc3-41db-48ab-b791-885c0f98f4c8","Type":"ContainerStarted","Data":"0ce5b2bb171b9737a0960a05dbf029789bf6313ec8ed46aa0e76a747e209075d"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.380379 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" event={"ID":"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f","Type":"ContainerStarted","Data":"d8950646da8a5d8f5c87afcf37877c831e42973568d310a20cb7e8356ccd5ea0"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.408009 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" event={"ID":"eacc98a4-22bf-4a38-8de0-2bf6fd395572","Type":"ContainerStarted","Data":"20c84b8683b5bd395b6581b636160252340f09372626ab6f9ff7dd0716663236"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.420267 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" event={"ID":"646d8bbb-f505-42f9-a23d-15b999c5acce","Type":"ContainerStarted","Data":"d6d5479c1558f8622d6090c6324faa9d3b617571ebf6ea381d2a817e1e47b7c4"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.448311 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" event={"ID":"b67589f2-8ee8-43a3-aaf9-e1767c0a75c5","Type":"ContainerStarted","Data":"00cf8250cd20dfa97d4c430a8d4d6d0fba70cf2ccb417e84fa639742cfe9e2d7"} Dec 06 05:42:48 crc kubenswrapper[4706]: I1206 05:42:48.476508 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" event={"ID":"ff8a3a6e-0623-417c-8e02-f16f34e3bfe9","Type":"ContainerStarted","Data":"52f48df2fbe156926dfd69940bf899ee7f9d776d7fb61621c349122f81324ff1"} Dec 06 05:42:49 crc kubenswrapper[4706]: I1206 05:42:49.484307 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" event={"ID":"0928e1f4-7912-465f-a991-9d0dda0a42d1","Type":"ContainerStarted","Data":"a48fb9ef849351f0f41c62e5c171c45c46ba8a90888302e7b0917ea8ddcb9d75"} Dec 06 05:42:49 crc kubenswrapper[4706]: I1206 05:42:49.486076 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" event={"ID":"b980759b-88cf-47ee-b7b0-12ebaddba6cd","Type":"ContainerStarted","Data":"47302ef03464eb2dd5d59a4404bbd9df71d7b3370fe6bda7ec47d9fc63f14cf3"} Dec 06 05:42:49 crc kubenswrapper[4706]: I1206 05:42:49.487444 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" event={"ID":"74049eb3-6721-4234-80cd-01b530d2d9e5","Type":"ContainerStarted","Data":"3aabaf3a2e931991452d97d95690e38c777f7854ad78314bfd4b5151e05c5fcd"} Dec 06 05:42:49 crc kubenswrapper[4706]: I1206 05:42:49.488978 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" event={"ID":"73d3329e-7a93-4d32-b7ba-0d5d6b468432","Type":"ContainerStarted","Data":"532e6fdf97739cb449533272b59ca874eb2a6f752da1050795786aedbe53bda7"} Dec 06 05:42:50 crc kubenswrapper[4706]: I1206 05:42:50.523497 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" event={"ID":"36973f56-f6d5-4a12-b86e-4ad7bcb3df6f","Type":"ContainerStarted","Data":"f655f5425b0bbb2b2139b3e1b85fbbe0dd59d7e93df012e8bc1e2912ce7fa769"} Dec 06 05:42:50 crc kubenswrapper[4706]: I1206 05:42:50.523919 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:42:50 crc kubenswrapper[4706]: I1206 05:42:50.545487 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" podStartSLOduration=49.545471607 podStartE2EDuration="49.545471607s" podCreationTimestamp="2025-12-06 05:42:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:42:50.544467559 +0000 UTC m=+1392.872291503" watchObservedRunningTime="2025-12-06 05:42:50.545471607 +0000 UTC m=+1392.873295561" Dec 06 05:42:51 crc kubenswrapper[4706]: E1206 05:42:51.798745 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" podUID="47a5741f-61c5-4de3-b020-50c25f0570f2" Dec 06 05:42:51 crc kubenswrapper[4706]: E1206 05:42:51.973144 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" podUID="de139c22-08fa-4b45-abda-af9394c16eac" Dec 06 05:42:51 crc kubenswrapper[4706]: E1206 05:42:51.975416 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" podUID="5f25d928-9f7a-4d1b-b1bb-abc58dad2080" Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.042313 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" podUID="34163fc1-16c7-4942-9eda-5afb77180d00" Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.213466 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" podUID="d28af7d8-b64b-48f1-9ac1-7f1cfc361751" Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.303868 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" podUID="9914167a-34c0-42fc-ac0c-af6f866b437f" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.540948 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" event={"ID":"09479c44-e706-4f72-a1f3-6b71d4b29f0b","Type":"ContainerStarted","Data":"d238b0512d919af056669580eadfac6046b36329afdafcad06550dbe42653873"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.540990 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" event={"ID":"09479c44-e706-4f72-a1f3-6b71d4b29f0b","Type":"ContainerStarted","Data":"7ce4e8a68c60f2aeb2cf7f3cee055f83a013c2dc283d48c8e525870551ae8e7e"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.541762 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.543533 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" event={"ID":"0e17be2a-d936-4d91-862a-b92014212bf6","Type":"ContainerStarted","Data":"f148d346f909eefc7b1603403e97e70e556216b9858de805dee1640ff15a25be"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.543564 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" event={"ID":"0e17be2a-d936-4d91-862a-b92014212bf6","Type":"ContainerStarted","Data":"c6f693ac927a2b1537141f17294c8f91df72b4ba893bee4d4b96e4f7cc0e32d7"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.543949 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.545280 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" event={"ID":"34163fc1-16c7-4942-9eda-5afb77180d00","Type":"ContainerStarted","Data":"c4d5e7eee085c161c569fdc6b1a2f3ac166cb3c60e773be3f259cda22447d64a"} Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.546321 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" podUID="34163fc1-16c7-4942-9eda-5afb77180d00" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.548309 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" event={"ID":"eacc98a4-22bf-4a38-8de0-2bf6fd395572","Type":"ContainerStarted","Data":"7e3e1d7779485053e61ce0a5419218e9c37ced9a5c4235c74c9c782dc21ad5b9"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.548844 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.550489 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" event={"ID":"bfd8649f-6345-40be-9193-e80b2ce0c1dc","Type":"ContainerStarted","Data":"cf84a794cb83026fdb53f48111cf72a53f09d8398bb5453b1344547f750930a3"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.550823 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.552118 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" event={"ID":"d28af7d8-b64b-48f1-9ac1-7f1cfc361751","Type":"ContainerStarted","Data":"5dae2901a8b5199bf93a33ab32284c31c028968f2f0627ae29795fe75cb9231a"} Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.553146 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:0b3fb69f35c151895d3dffd514974a9f9fe1c77c3bca69b78b81efb183cf4557\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" podUID="d28af7d8-b64b-48f1-9ac1-7f1cfc361751" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.554081 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" event={"ID":"74049eb3-6721-4234-80cd-01b530d2d9e5","Type":"ContainerStarted","Data":"d8cb02f89a7cc2fc3db77fa18c7692682d2b1a18cee3ea3d573849762ce25e5e"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.554454 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.555817 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" event={"ID":"73d3329e-7a93-4d32-b7ba-0d5d6b468432","Type":"ContainerStarted","Data":"7f0ea5de07b5d460a9b8005d2b276741cb7ea390a74ac0d8a035dbd61b1b69be"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.556201 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.558200 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" event={"ID":"47a5741f-61c5-4de3-b020-50c25f0570f2","Type":"ContainerStarted","Data":"f171a85bae178d198d90759503b15aad4edf5904cb83633e72c4450c2f4039b7"} Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.559271 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\"" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" podUID="47a5741f-61c5-4de3-b020-50c25f0570f2" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.560121 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" event={"ID":"0928e1f4-7912-465f-a991-9d0dda0a42d1","Type":"ContainerStarted","Data":"4f319b699bdef92dbb2d230d81a382bf30d0b0a8a01638f4156160d3787d494d"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.560512 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.562746 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" event={"ID":"646d8bbb-f505-42f9-a23d-15b999c5acce","Type":"ContainerStarted","Data":"7263c76109394bce2292b6f3738cfed6854f5d44be2507aec1edebdede0d3987"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.562885 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.564227 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" event={"ID":"9914167a-34c0-42fc-ac0c-af6f866b437f","Type":"ContainerStarted","Data":"697e151a0e390e9d427e5e06e30a04652d68afa73aa77df4f1d3fbefa4150036"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.566066 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" event={"ID":"b67589f2-8ee8-43a3-aaf9-e1767c0a75c5","Type":"ContainerStarted","Data":"e4420d7758742d2472b0d6880134e0fd3a6e0ffb84c99c8dadabaad5b799f139"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.566175 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.567779 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" event={"ID":"31b78248-5727-4a30-95ab-d75acc5a752b","Type":"ContainerStarted","Data":"ef1c734e28f3b744a659f9aefcac77ec6c13ad5b1070209232925add0700e241"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.568243 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.571835 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" event={"ID":"ff8a3a6e-0623-417c-8e02-f16f34e3bfe9","Type":"ContainerStarted","Data":"83e1c8a95d17db3714b425f2665f598dc79b5139fcdd7e8d8fc829cb6d440cb1"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.572089 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.573924 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" event={"ID":"de139c22-08fa-4b45-abda-af9394c16eac","Type":"ContainerStarted","Data":"32d44105ab65d49589a2d4f8b161a2204c70ab2ae9213cda74f5f65006c23fc5"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.575549 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" event={"ID":"9e547dc3-41db-48ab-b791-885c0f98f4c8","Type":"ContainerStarted","Data":"b639e867f9972daeba83e01d9f0ec18395dea4473c1d4f5ce265d1659971ae34"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.576382 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:52 crc kubenswrapper[4706]: E1206 05:42:52.576514 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:abdb733b01e92ac17f565762f30f1d075b44c16421bd06e557f6bb3c319e1809\\\"\"" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" podUID="de139c22-08fa-4b45-abda-af9394c16eac" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.578035 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" event={"ID":"5f25d928-9f7a-4d1b-b1bb-abc58dad2080","Type":"ContainerStarted","Data":"04e991efd028a439d53c54b2cb0a511812a37d682635fd8e841c24f801112555"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.579245 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.580089 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" event={"ID":"1d93b83c-6e45-44bf-b9b1-d6163c85d6b1","Type":"ContainerStarted","Data":"35cf374ce0f8d42e43f144d4d8630259b9e9e838cacf24859fffd868b9f65936"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.580158 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.581561 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" event={"ID":"b980759b-88cf-47ee-b7b0-12ebaddba6cd","Type":"ContainerStarted","Data":"1677bbc7421d073b92f184fa7a6eaf50feeb44cf65c85025f20590c6aae52a8e"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.581919 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.583736 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" event={"ID":"b6524ab6-7d15-4cf4-b3b2-dc9f0d014930","Type":"ContainerStarted","Data":"0f68db9ee498de2f5f940a3aa23c6fe55c4a4f7946d64d1012f8f183685e4674"} Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.584410 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.585779 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.595361 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" podStartSLOduration=49.009456224 podStartE2EDuration="52.595346261s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:47.847328326 +0000 UTC m=+1390.175152280" lastFinishedPulling="2025-12-06 05:42:51.433218373 +0000 UTC m=+1393.761042317" observedRunningTime="2025-12-06 05:42:52.588515697 +0000 UTC m=+1394.916339651" watchObservedRunningTime="2025-12-06 05:42:52.595346261 +0000 UTC m=+1394.923170205" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.625468 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" podStartSLOduration=3.5688023490000003 podStartE2EDuration="52.625449656s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:02.413186367 +0000 UTC m=+1344.741010311" lastFinishedPulling="2025-12-06 05:42:51.469833674 +0000 UTC m=+1393.797657618" observedRunningTime="2025-12-06 05:42:52.621805497 +0000 UTC m=+1394.949629441" watchObservedRunningTime="2025-12-06 05:42:52.625449656 +0000 UTC m=+1394.953273600" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.686152 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" podStartSLOduration=4.694208257 podStartE2EDuration="52.686137088s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.451361244 +0000 UTC m=+1345.779185188" lastFinishedPulling="2025-12-06 05:42:51.443290075 +0000 UTC m=+1393.771114019" observedRunningTime="2025-12-06 05:42:52.68400816 +0000 UTC m=+1395.011832104" watchObservedRunningTime="2025-12-06 05:42:52.686137088 +0000 UTC m=+1395.013961032" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.714970 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" podStartSLOduration=4.659646885 podStartE2EDuration="52.714952207s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.323968291 +0000 UTC m=+1345.651792235" lastFinishedPulling="2025-12-06 05:42:51.379273613 +0000 UTC m=+1393.707097557" observedRunningTime="2025-12-06 05:42:52.707669831 +0000 UTC m=+1395.035493775" watchObservedRunningTime="2025-12-06 05:42:52.714952207 +0000 UTC m=+1395.042776161" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.735240 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" podStartSLOduration=4.812091623 podStartE2EDuration="52.735226055s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.422328687 +0000 UTC m=+1345.750152621" lastFinishedPulling="2025-12-06 05:42:51.345463119 +0000 UTC m=+1393.673287053" observedRunningTime="2025-12-06 05:42:52.728701769 +0000 UTC m=+1395.056525723" watchObservedRunningTime="2025-12-06 05:42:52.735226055 +0000 UTC m=+1395.063049999" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.779026 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" podStartSLOduration=3.576920009 podStartE2EDuration="52.77900519s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:02.246894238 +0000 UTC m=+1344.574718192" lastFinishedPulling="2025-12-06 05:42:51.448979429 +0000 UTC m=+1393.776803373" observedRunningTime="2025-12-06 05:42:52.778665281 +0000 UTC m=+1395.106489225" watchObservedRunningTime="2025-12-06 05:42:52.77900519 +0000 UTC m=+1395.106829134" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.799569 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" podStartSLOduration=4.837613234 podStartE2EDuration="52.799550605s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.422650366 +0000 UTC m=+1345.750474300" lastFinishedPulling="2025-12-06 05:42:51.384587727 +0000 UTC m=+1393.712411671" observedRunningTime="2025-12-06 05:42:52.799134144 +0000 UTC m=+1395.126958088" watchObservedRunningTime="2025-12-06 05:42:52.799550605 +0000 UTC m=+1395.127374549" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.836801 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" podStartSLOduration=4.791483756 podStartE2EDuration="52.836786923s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.421953127 +0000 UTC m=+1345.749777071" lastFinishedPulling="2025-12-06 05:42:51.467256284 +0000 UTC m=+1393.795080238" observedRunningTime="2025-12-06 05:42:52.834566233 +0000 UTC m=+1395.162390177" watchObservedRunningTime="2025-12-06 05:42:52.836786923 +0000 UTC m=+1395.164610867" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.856304 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" podStartSLOduration=4.913847537 podStartE2EDuration="52.856291201s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.447355824 +0000 UTC m=+1345.775179768" lastFinishedPulling="2025-12-06 05:42:51.389799488 +0000 UTC m=+1393.717623432" observedRunningTime="2025-12-06 05:42:52.852373035 +0000 UTC m=+1395.180196989" watchObservedRunningTime="2025-12-06 05:42:52.856291201 +0000 UTC m=+1395.184115145" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.883205 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" podStartSLOduration=4.9102306989999995 podStartE2EDuration="52.883193038s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.42277269 +0000 UTC m=+1345.750596634" lastFinishedPulling="2025-12-06 05:42:51.395735039 +0000 UTC m=+1393.723558973" observedRunningTime="2025-12-06 05:42:52.879011736 +0000 UTC m=+1395.206835680" watchObservedRunningTime="2025-12-06 05:42:52.883193038 +0000 UTC m=+1395.211016982" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.892286 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-fcp7z" podStartSLOduration=4.985987448 podStartE2EDuration="52.892267224s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.42169253 +0000 UTC m=+1345.749516474" lastFinishedPulling="2025-12-06 05:42:51.327972306 +0000 UTC m=+1393.655796250" observedRunningTime="2025-12-06 05:42:52.890199928 +0000 UTC m=+1395.218023872" watchObservedRunningTime="2025-12-06 05:42:52.892267224 +0000 UTC m=+1395.220091168" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.941450 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" podStartSLOduration=49.455591803 podStartE2EDuration="52.941432504s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:47.847410538 +0000 UTC m=+1390.175234482" lastFinishedPulling="2025-12-06 05:42:51.333251239 +0000 UTC m=+1393.661075183" observedRunningTime="2025-12-06 05:42:52.936176742 +0000 UTC m=+1395.264000696" watchObservedRunningTime="2025-12-06 05:42:52.941432504 +0000 UTC m=+1395.269256448" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.965472 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" podStartSLOduration=3.715953779 podStartE2EDuration="52.965450394s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:02.215766575 +0000 UTC m=+1344.543590519" lastFinishedPulling="2025-12-06 05:42:51.46526319 +0000 UTC m=+1393.793087134" observedRunningTime="2025-12-06 05:42:52.953234583 +0000 UTC m=+1395.281058527" watchObservedRunningTime="2025-12-06 05:42:52.965450394 +0000 UTC m=+1395.293274358" Dec 06 05:42:52 crc kubenswrapper[4706]: I1206 05:42:52.983316 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" podStartSLOduration=4.870691924 podStartE2EDuration="52.983298807s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.347916578 +0000 UTC m=+1345.675740522" lastFinishedPulling="2025-12-06 05:42:51.460523461 +0000 UTC m=+1393.788347405" observedRunningTime="2025-12-06 05:42:52.977939242 +0000 UTC m=+1395.305763186" watchObservedRunningTime="2025-12-06 05:42:52.983298807 +0000 UTC m=+1395.311122751" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.005660 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-msm2n" podStartSLOduration=4.074253767 podStartE2EDuration="53.005637201s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:02.396326125 +0000 UTC m=+1344.724150069" lastFinishedPulling="2025-12-06 05:42:51.327709549 +0000 UTC m=+1393.655533503" observedRunningTime="2025-12-06 05:42:53.004693856 +0000 UTC m=+1395.332517800" watchObservedRunningTime="2025-12-06 05:42:53.005637201 +0000 UTC m=+1395.333461155" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.592771 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" event={"ID":"5f25d928-9f7a-4d1b-b1bb-abc58dad2080","Type":"ContainerStarted","Data":"584d4c3283e95ff90e2cf88582799fc53cb1d5e23a82ece869c9a28bbbf8de97"} Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.592929 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.594494 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" event={"ID":"9914167a-34c0-42fc-ac0c-af6f866b437f","Type":"ContainerStarted","Data":"756c8b0ff01f6fd09a7e89c66fe4933ee657ede92c7299385c76dba724cf9672"} Dec 06 05:42:53 crc kubenswrapper[4706]: E1206 05:42:53.598799 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:abdb733b01e92ac17f565762f30f1d075b44c16421bd06e557f6bb3c319e1809\\\"\"" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" podUID="de139c22-08fa-4b45-abda-af9394c16eac" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599158 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-jc6r8" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599210 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-mpkjv" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599237 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-5854674fcc-qmnhr" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599263 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-769dc69bc-tx6k9" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599766 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7c79b5df47-xctf2" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599804 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-697bc559fc-qfpfj" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.599859 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-wzlpz" Dec 06 05:42:53 crc kubenswrapper[4706]: E1206 05:42:53.599868 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:72ad6517987f674af0d0ae092cbb874aeae909c8b8b60188099c311762ebc8f7\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" podUID="34163fc1-16c7-4942-9eda-5afb77180d00" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.604035 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-jspvh" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.604097 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-lpjp5" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.604141 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-qr75r" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.628766 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" podStartSLOduration=3.978892153 podStartE2EDuration="53.628745507s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.421279379 +0000 UTC m=+1345.749103323" lastFinishedPulling="2025-12-06 05:42:53.071132733 +0000 UTC m=+1395.398956677" observedRunningTime="2025-12-06 05:42:53.624065151 +0000 UTC m=+1395.951889095" watchObservedRunningTime="2025-12-06 05:42:53.628745507 +0000 UTC m=+1395.956569451" Dec 06 05:42:53 crc kubenswrapper[4706]: I1206 05:42:53.862635 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" podStartSLOduration=4.213215816 podStartE2EDuration="53.862619955s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.349401988 +0000 UTC m=+1345.677225932" lastFinishedPulling="2025-12-06 05:42:52.998806127 +0000 UTC m=+1395.326630071" observedRunningTime="2025-12-06 05:42:53.858898254 +0000 UTC m=+1396.186722198" watchObservedRunningTime="2025-12-06 05:42:53.862619955 +0000 UTC m=+1396.190443899" Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.603700 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" event={"ID":"47a5741f-61c5-4de3-b020-50c25f0570f2","Type":"ContainerStarted","Data":"641152234f87cd6c0c5f2f08a7f8f1a8eb821a5f0ed7aeb4fb88ccb167cbf33c"} Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.604791 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.607469 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" event={"ID":"d28af7d8-b64b-48f1-9ac1-7f1cfc361751","Type":"ContainerStarted","Data":"1749d1b85d010bcdef49315af157a360f8799d495c6d2f61f7c048d68b9871fb"} Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.607922 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.611000 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.616560 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-998648c74-k5hqn" Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.629123 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" podStartSLOduration=3.927336848 podStartE2EDuration="54.629101989s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.393748134 +0000 UTC m=+1345.721572078" lastFinishedPulling="2025-12-06 05:42:54.095513275 +0000 UTC m=+1396.423337219" observedRunningTime="2025-12-06 05:42:54.62652476 +0000 UTC m=+1396.954348704" watchObservedRunningTime="2025-12-06 05:42:54.629101989 +0000 UTC m=+1396.956925973" Dec 06 05:42:54 crc kubenswrapper[4706]: I1206 05:42:54.647128 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" podStartSLOduration=3.874332569 podStartE2EDuration="54.647103477s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.348705269 +0000 UTC m=+1345.676529213" lastFinishedPulling="2025-12-06 05:42:54.121476187 +0000 UTC m=+1396.449300121" observedRunningTime="2025-12-06 05:42:54.643333435 +0000 UTC m=+1396.971157379" watchObservedRunningTime="2025-12-06 05:42:54.647103477 +0000 UTC m=+1396.974927451" Dec 06 05:42:56 crc kubenswrapper[4706]: I1206 05:42:56.778883 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-x7wwl" Dec 06 05:42:57 crc kubenswrapper[4706]: I1206 05:42:57.627013 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" event={"ID":"2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb","Type":"ContainerStarted","Data":"a6efaa1f5962a1ea6d2ed715d4dea47e393e4d1217af2253274b5922876e6cb4"} Dec 06 05:42:57 crc kubenswrapper[4706]: I1206 05:42:57.648881 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-pmfhv" podStartSLOduration=3.467059403 podStartE2EDuration="56.648862231s" podCreationTimestamp="2025-12-06 05:42:01 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.34910709 +0000 UTC m=+1345.676931034" lastFinishedPulling="2025-12-06 05:42:56.530909918 +0000 UTC m=+1398.858733862" observedRunningTime="2025-12-06 05:42:57.6454976 +0000 UTC m=+1399.973321634" watchObservedRunningTime="2025-12-06 05:42:57.648862231 +0000 UTC m=+1399.976686185" Dec 06 05:43:01 crc kubenswrapper[4706]: I1206 05:43:01.020952 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-bkvhv" Dec 06 05:43:01 crc kubenswrapper[4706]: I1206 05:43:01.105481 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-nhzq9" Dec 06 05:43:01 crc kubenswrapper[4706]: I1206 05:43:01.266648 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-b6456fdb6-q9dk8" Dec 06 05:43:01 crc kubenswrapper[4706]: I1206 05:43:01.292690 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-gz7v6" Dec 06 05:43:03 crc kubenswrapper[4706]: I1206 05:43:03.525505 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-7f6f47b7b7-lmnn4" Dec 06 05:43:05 crc kubenswrapper[4706]: I1206 05:43:05.689993 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" event={"ID":"34163fc1-16c7-4942-9eda-5afb77180d00","Type":"ContainerStarted","Data":"55ed5cbb56742df4d919ac11a1ca7d7ee35e8d2967486e72259e508033e1f74f"} Dec 06 05:43:05 crc kubenswrapper[4706]: I1206 05:43:05.691592 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:43:05 crc kubenswrapper[4706]: I1206 05:43:05.712149 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" podStartSLOduration=3.1723632840000002 podStartE2EDuration="1m5.712131744s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:02.247460323 +0000 UTC m=+1344.575284267" lastFinishedPulling="2025-12-06 05:43:04.787228743 +0000 UTC m=+1407.115052727" observedRunningTime="2025-12-06 05:43:05.707162439 +0000 UTC m=+1408.034986403" watchObservedRunningTime="2025-12-06 05:43:05.712131744 +0000 UTC m=+1408.039955708" Dec 06 05:43:06 crc kubenswrapper[4706]: I1206 05:43:06.841831 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-64bc77cfd455bk5" Dec 06 05:43:10 crc kubenswrapper[4706]: I1206 05:43:10.723974 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" event={"ID":"de139c22-08fa-4b45-abda-af9394c16eac","Type":"ContainerStarted","Data":"ddcc60c30b6ef81188b9137bbb3faa5cea074ef0c648ff941d493ae7f566193b"} Dec 06 05:43:10 crc kubenswrapper[4706]: I1206 05:43:10.725799 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:43:10 crc kubenswrapper[4706]: I1206 05:43:10.744291 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" podStartSLOduration=3.936524108 podStartE2EDuration="1m10.744272456s" podCreationTimestamp="2025-12-06 05:42:00 +0000 UTC" firstStartedPulling="2025-12-06 05:42:03.310124611 +0000 UTC m=+1345.637948555" lastFinishedPulling="2025-12-06 05:43:10.117872959 +0000 UTC m=+1412.445696903" observedRunningTime="2025-12-06 05:43:10.741636324 +0000 UTC m=+1413.069460278" watchObservedRunningTime="2025-12-06 05:43:10.744272456 +0000 UTC m=+1413.072096400" Dec 06 05:43:10 crc kubenswrapper[4706]: I1206 05:43:10.907579 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7765d96ddf-jvwv2" Dec 06 05:43:21 crc kubenswrapper[4706]: I1206 05:43:21.090231 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-77987cd8cd-vm2sj" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.948418 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-v4r7w"] Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.952450 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.955310 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-rsphz" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.955722 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.955738 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.958414 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.960827 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.960866 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:43:35 crc kubenswrapper[4706]: I1206 05:43:35.965286 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-v4r7w"] Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.047259 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-config\") pod \"dnsmasq-dns-675f4bcbfc-v4r7w\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.047370 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d5dmg\" (UniqueName: \"kubernetes.io/projected/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-kube-api-access-d5dmg\") pod \"dnsmasq-dns-675f4bcbfc-v4r7w\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.061401 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-d7qpg"] Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.071942 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.073784 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-d7qpg"] Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.076337 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.148415 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-config\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.148608 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.148677 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-config\") pod \"dnsmasq-dns-675f4bcbfc-v4r7w\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.148759 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d5dmg\" (UniqueName: \"kubernetes.io/projected/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-kube-api-access-d5dmg\") pod \"dnsmasq-dns-675f4bcbfc-v4r7w\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.148804 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knx7k\" (UniqueName: \"kubernetes.io/projected/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-kube-api-access-knx7k\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.149695 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-config\") pod \"dnsmasq-dns-675f4bcbfc-v4r7w\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.170607 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d5dmg\" (UniqueName: \"kubernetes.io/projected/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-kube-api-access-d5dmg\") pod \"dnsmasq-dns-675f4bcbfc-v4r7w\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.250484 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-config\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.250947 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.251139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knx7k\" (UniqueName: \"kubernetes.io/projected/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-kube-api-access-knx7k\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.251372 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-config\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.251828 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.268886 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knx7k\" (UniqueName: \"kubernetes.io/projected/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-kube-api-access-knx7k\") pod \"dnsmasq-dns-78dd6ddcc-d7qpg\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.277695 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.389359 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.756639 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-v4r7w"] Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.812297 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-d7qpg"] Dec 06 05:43:36 crc kubenswrapper[4706]: W1206 05:43:36.817882 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbfcf47c4_2af2_4393_9b3f_ed4f82f62f18.slice/crio-db6e5261807e7881d3cbce118125c9bba5e9ace7d40bb23f930cbea1418ca024 WatchSource:0}: Error finding container db6e5261807e7881d3cbce118125c9bba5e9ace7d40bb23f930cbea1418ca024: Status 404 returned error can't find the container with id db6e5261807e7881d3cbce118125c9bba5e9ace7d40bb23f930cbea1418ca024 Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.924908 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" event={"ID":"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18","Type":"ContainerStarted","Data":"db6e5261807e7881d3cbce118125c9bba5e9ace7d40bb23f930cbea1418ca024"} Dec 06 05:43:36 crc kubenswrapper[4706]: I1206 05:43:36.926177 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" event={"ID":"a845f2bd-03a5-491b-bb1f-59e6fa2a0136","Type":"ContainerStarted","Data":"192c4a7a658dc5cc733e27de110904060ed9c265cbd989a9e0fb41aaef60664b"} Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.038790 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-v4r7w"] Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.053600 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kr6cp"] Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.057145 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.066340 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kr6cp"] Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.192867 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.193323 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-config\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.193364 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5xp4\" (UniqueName: \"kubernetes.io/projected/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-kube-api-access-p5xp4\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.278485 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-d7qpg"] Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.295067 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5xp4\" (UniqueName: \"kubernetes.io/projected/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-kube-api-access-p5xp4\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.295184 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.295254 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-config\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.296074 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-dns-svc\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.296263 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-config\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.311742 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-xfjn8"] Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.313621 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.322265 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-xfjn8"] Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.337342 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5xp4\" (UniqueName: \"kubernetes.io/projected/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-kube-api-access-p5xp4\") pod \"dnsmasq-dns-666b6646f7-kr6cp\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.378184 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.398954 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-config\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.399108 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.399168 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wq5b9\" (UniqueName: \"kubernetes.io/projected/e7604eb9-df25-4027-bef0-567366a35e27-kube-api-access-wq5b9\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.503078 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-config\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.503129 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.503201 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wq5b9\" (UniqueName: \"kubernetes.io/projected/e7604eb9-df25-4027-bef0-567366a35e27-kube-api-access-wq5b9\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.504006 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-config\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.504303 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.552955 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wq5b9\" (UniqueName: \"kubernetes.io/projected/e7604eb9-df25-4027-bef0-567366a35e27-kube-api-access-wq5b9\") pod \"dnsmasq-dns-57d769cc4f-xfjn8\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.664172 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.727041 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kr6cp"] Dec 06 05:43:39 crc kubenswrapper[4706]: W1206 05:43:39.741270 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ddf1b75_2cc8_42d5_bcb7_d6c95e6b102e.slice/crio-3967a05dc7986677ff5d56b7c7e55de30a8a7d6ba99878445188175de1b4df39 WatchSource:0}: Error finding container 3967a05dc7986677ff5d56b7c7e55de30a8a7d6ba99878445188175de1b4df39: Status 404 returned error can't find the container with id 3967a05dc7986677ff5d56b7c7e55de30a8a7d6ba99878445188175de1b4df39 Dec 06 05:43:39 crc kubenswrapper[4706]: I1206 05:43:39.990930 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" event={"ID":"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e","Type":"ContainerStarted","Data":"3967a05dc7986677ff5d56b7c7e55de30a8a7d6ba99878445188175de1b4df39"} Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.165310 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-xfjn8"] Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.170906 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.172363 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.178265 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.181516 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.182143 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.182288 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.182460 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-l65q5" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.182503 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.182534 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.189498 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344149 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344222 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344245 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344265 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-config-data\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344295 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344351 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344382 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344417 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344435 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94qhx\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-kube-api-access-94qhx\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344457 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.344471 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.445498 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.445757 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446134 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446294 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446401 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-config-data\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446549 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446771 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446875 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.446990 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447171 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447293 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94qhx\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-kube-api-access-94qhx\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447422 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447534 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447458 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-config-data\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447536 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.447871 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.448039 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.450330 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.457439 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.457490 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.457631 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.457648 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.457822 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-gnvvx" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.458364 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.458844 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.459380 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.459772 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.460121 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.461498 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.469965 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94qhx\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-kube-api-access-94qhx\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.470542 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.476893 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.498954 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549110 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549150 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549173 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549190 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-57w24\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-kube-api-access-57w24\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549296 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549357 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549449 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549501 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549587 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549619 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.549691 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651337 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651447 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651474 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651504 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651528 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-57w24\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-kube-api-access-57w24\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651557 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651589 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651626 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651678 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651728 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.651754 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.652103 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.652405 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.654798 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.654872 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.656544 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.657229 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.658622 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.659403 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.660358 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.661611 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.675595 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-57w24\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-kube-api-access-57w24\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.679348 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.797166 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 06 05:43:40 crc kubenswrapper[4706]: I1206 05:43:40.846379 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.006256 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" event={"ID":"e7604eb9-df25-4027-bef0-567366a35e27","Type":"ContainerStarted","Data":"7b49b7b73d4264c52e71165e44d38a138cb57cd8d91a254fa3cced75852b72a1"} Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.612339 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.633604 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.633734 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.635901 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-mdz9v" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.638536 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.638846 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.638992 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.659364 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774066 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774123 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/74e1bb57-a746-472b-a3b1-ffb875c658e4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774151 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/74e1bb57-a746-472b-a3b1-ffb875c658e4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774176 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-config-data-default\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774190 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpljz\" (UniqueName: \"kubernetes.io/projected/74e1bb57-a746-472b-a3b1-ffb875c658e4-kube-api-access-hpljz\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774248 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-kolla-config\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774275 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.774303 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e1bb57-a746-472b-a3b1-ffb875c658e4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875242 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-kolla-config\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875320 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875356 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e1bb57-a746-472b-a3b1-ffb875c658e4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875406 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/74e1bb57-a746-472b-a3b1-ffb875c658e4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875423 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875445 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/74e1bb57-a746-472b-a3b1-ffb875c658e4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875473 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-config-data-default\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.875487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpljz\" (UniqueName: \"kubernetes.io/projected/74e1bb57-a746-472b-a3b1-ffb875c658e4-kube-api-access-hpljz\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.876304 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-kolla-config\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.876481 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.878269 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/74e1bb57-a746-472b-a3b1-ffb875c658e4-config-data-generated\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.878461 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-operator-scripts\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.878866 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/74e1bb57-a746-472b-a3b1-ffb875c658e4-config-data-default\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.881649 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e1bb57-a746-472b-a3b1-ffb875c658e4-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.890790 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/74e1bb57-a746-472b-a3b1-ffb875c658e4-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.898816 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.901268 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpljz\" (UniqueName: \"kubernetes.io/projected/74e1bb57-a746-472b-a3b1-ffb875c658e4-kube-api-access-hpljz\") pod \"openstack-galera-0\" (UID: \"74e1bb57-a746-472b-a3b1-ffb875c658e4\") " pod="openstack/openstack-galera-0" Dec 06 05:43:41 crc kubenswrapper[4706]: I1206 05:43:41.960313 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.209112 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.211224 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.216715 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.216913 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-lbhtx" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.217144 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.218824 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.219517 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.295803 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.295891 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.295919 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/08955916-6689-445e-830d-6fbfe9a2f460-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.295950 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2m4gp\" (UniqueName: \"kubernetes.io/projected/08955916-6689-445e-830d-6fbfe9a2f460-kube-api-access-2m4gp\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.295988 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.296117 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.296166 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08955916-6689-445e-830d-6fbfe9a2f460-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.296371 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/08955916-6689-445e-830d-6fbfe9a2f460-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398192 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/08955916-6689-445e-830d-6fbfe9a2f460-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398255 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398371 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398412 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/08955916-6689-445e-830d-6fbfe9a2f460-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398447 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2m4gp\" (UniqueName: \"kubernetes.io/projected/08955916-6689-445e-830d-6fbfe9a2f460-kube-api-access-2m4gp\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398516 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398535 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.398540 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08955916-6689-445e-830d-6fbfe9a2f460-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.399465 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.399582 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/08955916-6689-445e-830d-6fbfe9a2f460-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.400003 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.400469 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/08955916-6689-445e-830d-6fbfe9a2f460-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.405972 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/08955916-6689-445e-830d-6fbfe9a2f460-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.424788 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/08955916-6689-445e-830d-6fbfe9a2f460-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.427455 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.427623 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2m4gp\" (UniqueName: \"kubernetes.io/projected/08955916-6689-445e-830d-6fbfe9a2f460-kube-api-access-2m4gp\") pod \"openstack-cell1-galera-0\" (UID: \"08955916-6689-445e-830d-6fbfe9a2f460\") " pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.524713 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.542018 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.543546 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.545163 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-mzxxg" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.545408 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.545577 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.548745 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.602116 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.602162 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-kolla-config\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.602181 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-config-data\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.602236 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ttpp5\" (UniqueName: \"kubernetes.io/projected/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-kube-api-access-ttpp5\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.602282 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.703633 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.703683 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.703714 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-kolla-config\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.703734 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-config-data\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.703798 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ttpp5\" (UniqueName: \"kubernetes.io/projected/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-kube-api-access-ttpp5\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.704740 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-config-data\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.704809 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-kolla-config\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.719757 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-memcached-tls-certs\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.721212 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-combined-ca-bundle\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.722371 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ttpp5\" (UniqueName: \"kubernetes.io/projected/6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf-kube-api-access-ttpp5\") pod \"memcached-0\" (UID: \"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf\") " pod="openstack/memcached-0" Dec 06 05:43:43 crc kubenswrapper[4706]: I1206 05:43:43.908065 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.312686 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.314303 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.325787 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.326569 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-sstst" Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.333581 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkm9n\" (UniqueName: \"kubernetes.io/projected/32917516-145f-4318-a824-43d2fd3b5d85-kube-api-access-zkm9n\") pod \"kube-state-metrics-0\" (UID: \"32917516-145f-4318-a824-43d2fd3b5d85\") " pod="openstack/kube-state-metrics-0" Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.437959 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkm9n\" (UniqueName: \"kubernetes.io/projected/32917516-145f-4318-a824-43d2fd3b5d85-kube-api-access-zkm9n\") pod \"kube-state-metrics-0\" (UID: \"32917516-145f-4318-a824-43d2fd3b5d85\") " pod="openstack/kube-state-metrics-0" Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.465132 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkm9n\" (UniqueName: \"kubernetes.io/projected/32917516-145f-4318-a824-43d2fd3b5d85-kube-api-access-zkm9n\") pod \"kube-state-metrics-0\" (UID: \"32917516-145f-4318-a824-43d2fd3b5d85\") " pod="openstack/kube-state-metrics-0" Dec 06 05:43:45 crc kubenswrapper[4706]: I1206 05:43:45.633320 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.887532 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-cj4kx"] Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.889431 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.893968 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-k9ncm" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.894401 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.896894 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-cbrg2"] Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.897869 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.899806 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.921333 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-cj4kx"] Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.934335 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cbrg2"] Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.993594 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-lib\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.993721 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cbdbd121-5030-4488-9425-7548fb291906-scripts\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.993952 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-etc-ovs\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.993998 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9rsb\" (UniqueName: \"kubernetes.io/projected/cbdbd121-5030-4488-9425-7548fb291906-kube-api-access-b9rsb\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.994075 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-run\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:48 crc kubenswrapper[4706]: I1206 05:43:48.994108 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-log\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.095812 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvmzd\" (UniqueName: \"kubernetes.io/projected/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-kube-api-access-wvmzd\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.095895 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cbdbd121-5030-4488-9425-7548fb291906-scripts\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.095918 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-scripts\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.095945 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-etc-ovs\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.095981 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9rsb\" (UniqueName: \"kubernetes.io/projected/cbdbd121-5030-4488-9425-7548fb291906-kube-api-access-b9rsb\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096005 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-run-ovn\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096059 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-run\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096087 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-run\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096109 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-ovn-controller-tls-certs\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096131 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-combined-ca-bundle\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096159 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-log\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096205 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-lib\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.096228 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-log-ovn\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.099709 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-etc-ovs\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.099907 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-log\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.100014 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-run\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.100131 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/cbdbd121-5030-4488-9425-7548fb291906-var-lib\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.101328 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cbdbd121-5030-4488-9425-7548fb291906-scripts\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.139756 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9rsb\" (UniqueName: \"kubernetes.io/projected/cbdbd121-5030-4488-9425-7548fb291906-kube-api-access-b9rsb\") pod \"ovn-controller-ovs-cj4kx\" (UID: \"cbdbd121-5030-4488-9425-7548fb291906\") " pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.233483 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-scripts\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.233498 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.233704 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-run-ovn\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.233546 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-run-ovn\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.234586 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-run\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.234629 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-ovn-controller-tls-certs\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.234660 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-combined-ca-bundle\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.234764 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-log-ovn\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.234833 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvmzd\" (UniqueName: \"kubernetes.io/projected/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-kube-api-access-wvmzd\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.235289 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-run\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.235438 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-scripts\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.235651 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-var-log-ovn\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.238677 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-ovn-controller-tls-certs\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.238853 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-combined-ca-bundle\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.267473 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvmzd\" (UniqueName: \"kubernetes.io/projected/cde7e1a3-dd72-47aa-a0b5-117bc2c53885-kube-api-access-wvmzd\") pod \"ovn-controller-cbrg2\" (UID: \"cde7e1a3-dd72-47aa-a0b5-117bc2c53885\") " pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:49 crc kubenswrapper[4706]: I1206 05:43:49.528296 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.330639 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.332063 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.336289 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.336708 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-2p6j6" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.336713 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.337336 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.339212 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.349043 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.454994 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455071 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455093 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c330d787-77c8-4014-85a5-7d1bcf73836b-config\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455131 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455154 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455173 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c330d787-77c8-4014-85a5-7d1bcf73836b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455253 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c330d787-77c8-4014-85a5-7d1bcf73836b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.455282 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jg9b\" (UniqueName: \"kubernetes.io/projected/c330d787-77c8-4014-85a5-7d1bcf73836b-kube-api-access-4jg9b\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.556908 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.556965 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c330d787-77c8-4014-85a5-7d1bcf73836b-config\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.557014 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.557066 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.557094 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c330d787-77c8-4014-85a5-7d1bcf73836b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.557123 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c330d787-77c8-4014-85a5-7d1bcf73836b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.557162 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jg9b\" (UniqueName: \"kubernetes.io/projected/c330d787-77c8-4014-85a5-7d1bcf73836b-kube-api-access-4jg9b\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.557213 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.558111 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.558610 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/c330d787-77c8-4014-85a5-7d1bcf73836b-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.558847 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c330d787-77c8-4014-85a5-7d1bcf73836b-config\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.558867 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c330d787-77c8-4014-85a5-7d1bcf73836b-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.562573 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.563857 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.564589 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/c330d787-77c8-4014-85a5-7d1bcf73836b-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.577495 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jg9b\" (UniqueName: \"kubernetes.io/projected/c330d787-77c8-4014-85a5-7d1bcf73836b-kube-api-access-4jg9b\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.582161 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"c330d787-77c8-4014-85a5-7d1bcf73836b\") " pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:50 crc kubenswrapper[4706]: I1206 05:43:50.661692 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Dec 06 05:43:52 crc kubenswrapper[4706]: I1206 05:43:52.999596 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.000868 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.002919 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.005701 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-q88z5" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.005698 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.006351 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.011233 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112015 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cpbb\" (UniqueName: \"kubernetes.io/projected/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-kube-api-access-9cpbb\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112107 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-config\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112218 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112326 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112350 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112494 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112523 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.112544 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240709 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240757 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240778 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240817 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cpbb\" (UniqueName: \"kubernetes.io/projected/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-kube-api-access-9cpbb\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240865 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-config\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240903 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240949 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.240969 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.241065 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.241916 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-config\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.242165 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.245673 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.248439 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.251477 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.254259 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.270484 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.274428 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cpbb\" (UniqueName: \"kubernetes.io/projected/51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad-kube-api-access-9cpbb\") pod \"ovsdbserver-sb-0\" (UID: \"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad\") " pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:53 crc kubenswrapper[4706]: I1206 05:43:53.323446 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Dec 06 05:43:54 crc kubenswrapper[4706]: E1206 05:43:54.816257 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 06 05:43:54 crc kubenswrapper[4706]: E1206 05:43:54.816412 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d5dmg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-v4r7w_openstack(a845f2bd-03a5-491b-bb1f-59e6fa2a0136): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:43:54 crc kubenswrapper[4706]: E1206 05:43:54.817768 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" podUID="a845f2bd-03a5-491b-bb1f-59e6fa2a0136" Dec 06 05:43:54 crc kubenswrapper[4706]: E1206 05:43:54.889554 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Dec 06 05:43:54 crc kubenswrapper[4706]: E1206 05:43:54.889705 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-knx7k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-d7qpg_openstack(bfcf47c4-2af2-4393-9b3f-ed4f82f62f18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:43:54 crc kubenswrapper[4706]: E1206 05:43:54.890897 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" podUID="bfcf47c4-2af2-4393-9b3f-ed4f82f62f18" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.563748 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.631397 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.699597 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-config\") pod \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.699680 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d5dmg\" (UniqueName: \"kubernetes.io/projected/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-kube-api-access-d5dmg\") pod \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\" (UID: \"a845f2bd-03a5-491b-bb1f-59e6fa2a0136\") " Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.701516 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-config" (OuterVolumeSpecName: "config") pod "a845f2bd-03a5-491b-bb1f-59e6fa2a0136" (UID: "a845f2bd-03a5-491b-bb1f-59e6fa2a0136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.706155 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-kube-api-access-d5dmg" (OuterVolumeSpecName: "kube-api-access-d5dmg") pod "a845f2bd-03a5-491b-bb1f-59e6fa2a0136" (UID: "a845f2bd-03a5-491b-bb1f-59e6fa2a0136"). InnerVolumeSpecName "kube-api-access-d5dmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.733241 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.739409 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.745966 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.751979 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Dec 06 05:43:55 crc kubenswrapper[4706]: W1206 05:43:55.754099 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74e1bb57_a746_472b_a3b1_ffb875c658e4.slice/crio-1f161471dde93a5d18ede45d721a9fa7c1ef121c9b2817b41cf228892473b29a WatchSource:0}: Error finding container 1f161471dde93a5d18ede45d721a9fa7c1ef121c9b2817b41cf228892473b29a: Status 404 returned error can't find the container with id 1f161471dde93a5d18ede45d721a9fa7c1ef121c9b2817b41cf228892473b29a Dec 06 05:43:55 crc kubenswrapper[4706]: W1206 05:43:55.758668 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod08955916_6689_445e_830d_6fbfe9a2f460.slice/crio-fc001d9cb8cd16628b0b16b8d70ada626862572f50d7e9ab8daa520944a285cc WatchSource:0}: Error finding container fc001d9cb8cd16628b0b16b8d70ada626862572f50d7e9ab8daa520944a285cc: Status 404 returned error can't find the container with id fc001d9cb8cd16628b0b16b8d70ada626862572f50d7e9ab8daa520944a285cc Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.767163 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.802092 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d5dmg\" (UniqueName: \"kubernetes.io/projected/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-kube-api-access-d5dmg\") on node \"crc\" DevicePath \"\"" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.802122 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a845f2bd-03a5-491b-bb1f-59e6fa2a0136-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.902883 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knx7k\" (UniqueName: \"kubernetes.io/projected/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-kube-api-access-knx7k\") pod \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.902984 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-dns-svc\") pod \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.903155 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-config\") pod \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\" (UID: \"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18\") " Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.903597 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bfcf47c4-2af2-4393-9b3f-ed4f82f62f18" (UID: "bfcf47c4-2af2-4393-9b3f-ed4f82f62f18"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.903801 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-config" (OuterVolumeSpecName: "config") pod "bfcf47c4-2af2-4393-9b3f-ed4f82f62f18" (UID: "bfcf47c4-2af2-4393-9b3f-ed4f82f62f18"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:43:55 crc kubenswrapper[4706]: I1206 05:43:55.905649 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-kube-api-access-knx7k" (OuterVolumeSpecName: "kube-api-access-knx7k") pod "bfcf47c4-2af2-4393-9b3f-ed4f82f62f18" (UID: "bfcf47c4-2af2-4393-9b3f-ed4f82f62f18"). InnerVolumeSpecName "kube-api-access-knx7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.005181 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.005220 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knx7k\" (UniqueName: \"kubernetes.io/projected/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-kube-api-access-knx7k\") on node \"crc\" DevicePath \"\"" Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.005238 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.138897 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cbrg2"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.152368 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.152361 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-d7qpg" event={"ID":"bfcf47c4-2af2-4393-9b3f-ed4f82f62f18","Type":"ContainerDied","Data":"db6e5261807e7881d3cbce118125c9bba5e9ace7d40bb23f930cbea1418ca024"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.162101 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.163089 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e","Type":"ContainerStarted","Data":"46d3335f068a1373fca9307feb78c1dbcdf94557955afa5e22413ab46f343bc9"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.165234 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" event={"ID":"a845f2bd-03a5-491b-bb1f-59e6fa2a0136","Type":"ContainerDied","Data":"192c4a7a658dc5cc733e27de110904060ed9c265cbd989a9e0fb41aaef60664b"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.165305 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-v4r7w" Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.167010 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"08955916-6689-445e-830d-6fbfe9a2f460","Type":"ContainerStarted","Data":"fc001d9cb8cd16628b0b16b8d70ada626862572f50d7e9ab8daa520944a285cc"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.168010 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"74e1bb57-a746-472b-a3b1-ffb875c658e4","Type":"ContainerStarted","Data":"1f161471dde93a5d18ede45d721a9fa7c1ef121c9b2817b41cf228892473b29a"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.169541 4706 generic.go:334] "Generic (PLEG): container finished" podID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerID="b86756d03a7facdb7ceb50929af31a573ddefbde5c97e3706c828287ad4a4878" exitCode=0 Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.169586 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" event={"ID":"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e","Type":"ContainerDied","Data":"b86756d03a7facdb7ceb50929af31a573ddefbde5c97e3706c828287ad4a4878"} Dec 06 05:43:56 crc kubenswrapper[4706]: W1206 05:43:56.170291 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcde7e1a3_dd72_47aa_a0b5_117bc2c53885.slice/crio-0a4bb83429b6b603c1bd46506095dc6f3eeaa557cd4db839c0c01e818671d0f0 WatchSource:0}: Error finding container 0a4bb83429b6b603c1bd46506095dc6f3eeaa557cd4db839c0c01e818671d0f0: Status 404 returned error can't find the container with id 0a4bb83429b6b603c1bd46506095dc6f3eeaa557cd4db839c0c01e818671d0f0 Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.175792 4706 generic.go:334] "Generic (PLEG): container finished" podID="e7604eb9-df25-4027-bef0-567366a35e27" containerID="54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1" exitCode=0 Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.175858 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" event={"ID":"e7604eb9-df25-4027-bef0-567366a35e27","Type":"ContainerDied","Data":"54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.182852 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bfd60e65-9bee-4772-bbd5-b6d64a5a225c","Type":"ContainerStarted","Data":"0b15f626a9411f3bced3566da1d506b9c72eb68f413c6ccf5a0cd47309ab5802"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.208922 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-d7qpg"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.217167 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf","Type":"ContainerStarted","Data":"cee98a48441d5c2d26dca021fb185c4412e263e68f26554d4ac56d61c272e452"} Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.260685 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-d7qpg"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.329359 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-v4r7w"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.341156 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-v4r7w"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.349908 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Dec 06 05:43:56 crc kubenswrapper[4706]: I1206 05:43:56.933108 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Dec 06 05:43:56 crc kubenswrapper[4706]: W1206 05:43:56.959125 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51b1b8ca_3f0f_47f0_bfad_860eaa7f19ad.slice/crio-03af45f840cfcc605ee51c8e2528b4fea14343ce6beb0a98c69febad8983b798 WatchSource:0}: Error finding container 03af45f840cfcc605ee51c8e2528b4fea14343ce6beb0a98c69febad8983b798: Status 404 returned error can't find the container with id 03af45f840cfcc605ee51c8e2528b4fea14343ce6beb0a98c69febad8983b798 Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.074176 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-cj4kx"] Dec 06 05:43:57 crc kubenswrapper[4706]: W1206 05:43:57.081829 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbdbd121_5030_4488_9425_7548fb291906.slice/crio-604260b7f01bfe12baf41b34efc15c660d7ecc7315d7052a479637546dc3a463 WatchSource:0}: Error finding container 604260b7f01bfe12baf41b34efc15c660d7ecc7315d7052a479637546dc3a463: Status 404 returned error can't find the container with id 604260b7f01bfe12baf41b34efc15c660d7ecc7315d7052a479637546dc3a463 Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.225997 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c330d787-77c8-4014-85a5-7d1bcf73836b","Type":"ContainerStarted","Data":"0c3500d551c6a5a92ae664d733cafc5549172fb2371a3dcec6ea9f392ffa632c"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.228551 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" event={"ID":"e7604eb9-df25-4027-bef0-567366a35e27","Type":"ContainerStarted","Data":"a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.229104 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.230623 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"32917516-145f-4318-a824-43d2fd3b5d85","Type":"ContainerStarted","Data":"45bf7a73be46738e918eb53274ac11999f2bbe1783227c8b4a95f9b96fbcee12"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.232780 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cj4kx" event={"ID":"cbdbd121-5030-4488-9425-7548fb291906","Type":"ContainerStarted","Data":"604260b7f01bfe12baf41b34efc15c660d7ecc7315d7052a479637546dc3a463"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.234034 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad","Type":"ContainerStarted","Data":"03af45f840cfcc605ee51c8e2528b4fea14343ce6beb0a98c69febad8983b798"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.235930 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" event={"ID":"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e","Type":"ContainerStarted","Data":"4060c18dc081f2c405b6a535afd8fb1fa3c38f8f9c23ec029cf8d4be88466118"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.236444 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.238819 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2" event={"ID":"cde7e1a3-dd72-47aa-a0b5-117bc2c53885","Type":"ContainerStarted","Data":"0a4bb83429b6b603c1bd46506095dc6f3eeaa557cd4db839c0c01e818671d0f0"} Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.249694 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" podStartSLOduration=3.357947766 podStartE2EDuration="18.249672476s" podCreationTimestamp="2025-12-06 05:43:39 +0000 UTC" firstStartedPulling="2025-12-06 05:43:40.184033296 +0000 UTC m=+1442.511857230" lastFinishedPulling="2025-12-06 05:43:55.075758006 +0000 UTC m=+1457.403581940" observedRunningTime="2025-12-06 05:43:57.248814713 +0000 UTC m=+1459.576638657" watchObservedRunningTime="2025-12-06 05:43:57.249672476 +0000 UTC m=+1459.577496420" Dec 06 05:43:57 crc kubenswrapper[4706]: I1206 05:43:57.271929 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" podStartSLOduration=3.009349337 podStartE2EDuration="18.271909118s" podCreationTimestamp="2025-12-06 05:43:39 +0000 UTC" firstStartedPulling="2025-12-06 05:43:39.744407443 +0000 UTC m=+1442.072231387" lastFinishedPulling="2025-12-06 05:43:55.006967224 +0000 UTC m=+1457.334791168" observedRunningTime="2025-12-06 05:43:57.269501362 +0000 UTC m=+1459.597325306" watchObservedRunningTime="2025-12-06 05:43:57.271909118 +0000 UTC m=+1459.599733062" Dec 06 05:43:58 crc kubenswrapper[4706]: I1206 05:43:58.056204 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a845f2bd-03a5-491b-bb1f-59e6fa2a0136" path="/var/lib/kubelet/pods/a845f2bd-03a5-491b-bb1f-59e6fa2a0136/volumes" Dec 06 05:43:58 crc kubenswrapper[4706]: I1206 05:43:58.057082 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfcf47c4-2af2-4393-9b3f-ed4f82f62f18" path="/var/lib/kubelet/pods/bfcf47c4-2af2-4393-9b3f-ed4f82f62f18/volumes" Dec 06 05:44:04 crc kubenswrapper[4706]: I1206 05:44:04.380202 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:44:04 crc kubenswrapper[4706]: I1206 05:44:04.665990 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:44:04 crc kubenswrapper[4706]: I1206 05:44:04.746928 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kr6cp"] Dec 06 05:44:05 crc kubenswrapper[4706]: I1206 05:44:05.336322 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="dnsmasq-dns" containerID="cri-o://4060c18dc081f2c405b6a535afd8fb1fa3c38f8f9c23ec029cf8d4be88466118" gracePeriod=10 Dec 06 05:44:05 crc kubenswrapper[4706]: I1206 05:44:05.961357 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:44:05 crc kubenswrapper[4706]: I1206 05:44:05.961705 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:44:06 crc kubenswrapper[4706]: I1206 05:44:06.344142 4706 generic.go:334] "Generic (PLEG): container finished" podID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerID="4060c18dc081f2c405b6a535afd8fb1fa3c38f8f9c23ec029cf8d4be88466118" exitCode=0 Dec 06 05:44:06 crc kubenswrapper[4706]: I1206 05:44:06.344203 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" event={"ID":"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e","Type":"ContainerDied","Data":"4060c18dc081f2c405b6a535afd8fb1fa3c38f8f9c23ec029cf8d4be88466118"} Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.337727 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.338331 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hpljz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(74e1bb57-a746-472b-a3b1-ffb875c658e4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.339514 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="74e1bb57-a746-472b-a3b1-ffb875c658e4" Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.356947 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.357111 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-57w24,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(f16a0463-de95-4c8c-a1b5-d80e8a2ec59e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.358167 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" Dec 06 05:44:08 crc kubenswrapper[4706]: E1206 05:44:08.362935 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="74e1bb57-a746-472b-a3b1-ffb875c658e4" Dec 06 05:44:09 crc kubenswrapper[4706]: E1206 05:44:09.368664 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" Dec 06 05:44:09 crc kubenswrapper[4706]: I1206 05:44:09.383333 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.91:5353: connect: connection refused" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.012011 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.012438 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n64dh657h576h695h5bh5b4h87h65ch647hddhdfh5c4hdch66bhb8h58ch89h569h579h65h548h75h5f6h5f4h559h55fh5b5hddh64dhf5h65ch5cfq,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ttpp5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.013626 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.049570 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.049710 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2m4gp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(08955916-6689-445e-830d-6fbfe9a2f460): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.050966 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="08955916-6689-445e-830d-6fbfe9a2f460" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.375364 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="08955916-6689-445e-830d-6fbfe9a2f460" Dec 06 05:44:10 crc kubenswrapper[4706]: E1206 05:44:10.375400 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.661574 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.760998 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5xp4\" (UniqueName: \"kubernetes.io/projected/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-kube-api-access-p5xp4\") pod \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.761132 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-config\") pod \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.761221 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-dns-svc\") pod \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\" (UID: \"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e\") " Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.767156 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-kube-api-access-p5xp4" (OuterVolumeSpecName: "kube-api-access-p5xp4") pod "8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" (UID: "8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e"). InnerVolumeSpecName "kube-api-access-p5xp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.796689 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-config" (OuterVolumeSpecName: "config") pod "8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" (UID: "8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.809287 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" (UID: "8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.864678 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.864709 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5xp4\" (UniqueName: \"kubernetes.io/projected/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-kube-api-access-p5xp4\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:10 crc kubenswrapper[4706]: I1206 05:44:10.864720 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:11 crc kubenswrapper[4706]: I1206 05:44:11.382825 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" event={"ID":"8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e","Type":"ContainerDied","Data":"3967a05dc7986677ff5d56b7c7e55de30a8a7d6ba99878445188175de1b4df39"} Dec 06 05:44:11 crc kubenswrapper[4706]: I1206 05:44:11.382876 4706 scope.go:117] "RemoveContainer" containerID="4060c18dc081f2c405b6a535afd8fb1fa3c38f8f9c23ec029cf8d4be88466118" Dec 06 05:44:11 crc kubenswrapper[4706]: I1206 05:44:11.383006 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-kr6cp" Dec 06 05:44:11 crc kubenswrapper[4706]: I1206 05:44:11.422586 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kr6cp"] Dec 06 05:44:11 crc kubenswrapper[4706]: I1206 05:44:11.427852 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-kr6cp"] Dec 06 05:44:11 crc kubenswrapper[4706]: I1206 05:44:11.790083 4706 scope.go:117] "RemoveContainer" containerID="b86756d03a7facdb7ceb50929af31a573ddefbde5c97e3706c828287ad4a4878" Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.048564 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" path="/var/lib/kubelet/pods/8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e/volumes" Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.391589 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c330d787-77c8-4014-85a5-7d1bcf73836b","Type":"ContainerStarted","Data":"b265f2ba054d4e81b1f475288a049b5ab2c5a5b7287310f7215c188d5fd4657c"} Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.393592 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"32917516-145f-4318-a824-43d2fd3b5d85","Type":"ContainerStarted","Data":"1ae8c36354b8ded090cf54138a5767d9379f8ad8b265af89be3934ed91c072db"} Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.393835 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.394874 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cj4kx" event={"ID":"cbdbd121-5030-4488-9425-7548fb291906","Type":"ContainerStarted","Data":"a6995b851a86fe458c6ffcaa21dda6203c40a9b27f986652c1b7a0cc3eed56c9"} Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.396342 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad","Type":"ContainerStarted","Data":"c3ed134c7222df87436519e4c5ab93d0981659ad3c9f8dbbc5bc0cbdb2ae82e9"} Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.398768 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2" event={"ID":"cde7e1a3-dd72-47aa-a0b5-117bc2c53885","Type":"ContainerStarted","Data":"acbd30d546b2b242d91058217c991ad4a7ef3433313a6aacd5cf130ed36cef6b"} Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.398996 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-cbrg2" Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.413407 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=11.734103939 podStartE2EDuration="27.413383474s" podCreationTimestamp="2025-12-06 05:43:45 +0000 UTC" firstStartedPulling="2025-12-06 05:43:56.217972296 +0000 UTC m=+1458.545796240" lastFinishedPulling="2025-12-06 05:44:11.897251811 +0000 UTC m=+1474.225075775" observedRunningTime="2025-12-06 05:44:12.408542192 +0000 UTC m=+1474.736366146" watchObservedRunningTime="2025-12-06 05:44:12.413383474 +0000 UTC m=+1474.741207438" Dec 06 05:44:12 crc kubenswrapper[4706]: I1206 05:44:12.428446 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-cbrg2" podStartSLOduration=9.50454386 podStartE2EDuration="24.4284241s" podCreationTimestamp="2025-12-06 05:43:48 +0000 UTC" firstStartedPulling="2025-12-06 05:43:56.187382318 +0000 UTC m=+1458.515206262" lastFinishedPulling="2025-12-06 05:44:11.111262558 +0000 UTC m=+1473.439086502" observedRunningTime="2025-12-06 05:44:12.426771586 +0000 UTC m=+1474.754595540" watchObservedRunningTime="2025-12-06 05:44:12.4284241 +0000 UTC m=+1474.756248044" Dec 06 05:44:13 crc kubenswrapper[4706]: I1206 05:44:13.408644 4706 generic.go:334] "Generic (PLEG): container finished" podID="cbdbd121-5030-4488-9425-7548fb291906" containerID="a6995b851a86fe458c6ffcaa21dda6203c40a9b27f986652c1b7a0cc3eed56c9" exitCode=0 Dec 06 05:44:13 crc kubenswrapper[4706]: I1206 05:44:13.408700 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cj4kx" event={"ID":"cbdbd121-5030-4488-9425-7548fb291906","Type":"ContainerDied","Data":"a6995b851a86fe458c6ffcaa21dda6203c40a9b27f986652c1b7a0cc3eed56c9"} Dec 06 05:44:13 crc kubenswrapper[4706]: I1206 05:44:13.414546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bfd60e65-9bee-4772-bbd5-b6d64a5a225c","Type":"ContainerStarted","Data":"59ea841a87bb87aa6d7b186eaa2155dbd28a5d718db5af4f41b422fa2c8ac0c7"} Dec 06 05:44:14 crc kubenswrapper[4706]: I1206 05:44:14.422444 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cj4kx" event={"ID":"cbdbd121-5030-4488-9425-7548fb291906","Type":"ContainerStarted","Data":"746a4ede80dac3dcec5b542114e8eee96539e5970de12781f92cacf25d6dd41d"} Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.437003 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"c330d787-77c8-4014-85a5-7d1bcf73836b","Type":"ContainerStarted","Data":"6d3d63994fc6294f1a5f02153fdbfe77114149232531c321ae8c69fd7cc36304"} Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.440075 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-cj4kx" event={"ID":"cbdbd121-5030-4488-9425-7548fb291906","Type":"ContainerStarted","Data":"0cb251a49975af48ff2ee7ce0a32eb5c75d8fc889af1571d3901ac99158d23dc"} Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.440131 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.440165 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.442164 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad","Type":"ContainerStarted","Data":"6f93b7d9a89c0b4b948ff3e2c9c17c7ed2a0cbc3ebd750b9f8a509dd958ed7e8"} Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.470273 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=8.547483752 podStartE2EDuration="27.470253152s" podCreationTimestamp="2025-12-06 05:43:49 +0000 UTC" firstStartedPulling="2025-12-06 05:43:56.333733857 +0000 UTC m=+1458.661557801" lastFinishedPulling="2025-12-06 05:44:15.256503257 +0000 UTC m=+1477.584327201" observedRunningTime="2025-12-06 05:44:16.453393056 +0000 UTC m=+1478.781217000" watchObservedRunningTime="2025-12-06 05:44:16.470253152 +0000 UTC m=+1478.798077096" Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.491743 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=7.21386981 podStartE2EDuration="25.491723853s" podCreationTimestamp="2025-12-06 05:43:51 +0000 UTC" firstStartedPulling="2025-12-06 05:43:56.963670639 +0000 UTC m=+1459.291494583" lastFinishedPulling="2025-12-06 05:44:15.241524692 +0000 UTC m=+1477.569348626" observedRunningTime="2025-12-06 05:44:16.487137229 +0000 UTC m=+1478.814961173" watchObservedRunningTime="2025-12-06 05:44:16.491723853 +0000 UTC m=+1478.819547797" Dec 06 05:44:16 crc kubenswrapper[4706]: I1206 05:44:16.508753 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-cj4kx" podStartSLOduration=14.396630923 podStartE2EDuration="28.508737103s" podCreationTimestamp="2025-12-06 05:43:48 +0000 UTC" firstStartedPulling="2025-12-06 05:43:57.084678912 +0000 UTC m=+1459.412502856" lastFinishedPulling="2025-12-06 05:44:11.196785052 +0000 UTC m=+1473.524609036" observedRunningTime="2025-12-06 05:44:16.507241342 +0000 UTC m=+1478.835065276" watchObservedRunningTime="2025-12-06 05:44:16.508737103 +0000 UTC m=+1478.836561047" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.324106 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.364698 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.447830 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.481219 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.662672 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.707409 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.737868 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-nx422"] Dec 06 05:44:17 crc kubenswrapper[4706]: E1206 05:44:17.738190 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="dnsmasq-dns" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.738206 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="dnsmasq-dns" Dec 06 05:44:17 crc kubenswrapper[4706]: E1206 05:44:17.738248 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="init" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.738255 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="init" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.738395 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ddf1b75-2cc8-42d5-bcb7-d6c95e6b102e" containerName="dnsmasq-dns" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.741369 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.743065 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.750995 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-nx422"] Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.900688 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.900763 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-config\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.900879 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.900939 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqkln\" (UniqueName: \"kubernetes.io/projected/a118ce93-942a-4501-8004-1302d13660b8-kube-api-access-kqkln\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.927435 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-5wn4n"] Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.928577 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.930267 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Dec 06 05:44:17 crc kubenswrapper[4706]: I1206 05:44:17.936977 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-5wn4n"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.002013 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqkln\" (UniqueName: \"kubernetes.io/projected/a118ce93-942a-4501-8004-1302d13660b8-kube-api-access-kqkln\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.002122 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.002153 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-config\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.002222 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.003121 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.003191 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.003641 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-config\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.028354 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqkln\" (UniqueName: \"kubernetes.io/projected/a118ce93-942a-4501-8004-1302d13660b8-kube-api-access-kqkln\") pod \"dnsmasq-dns-7f896c8c65-nx422\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.070346 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.103526 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc140eba-adb0-407f-8472-1270d4fc5263-combined-ca-bundle\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.104016 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/bc140eba-adb0-407f-8472-1270d4fc5263-ovn-rundir\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.104069 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc140eba-adb0-407f-8472-1270d4fc5263-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.104116 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/bc140eba-adb0-407f-8472-1270d4fc5263-ovs-rundir\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.104136 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc140eba-adb0-407f-8472-1270d4fc5263-config\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.104180 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcx8d\" (UniqueName: \"kubernetes.io/projected/bc140eba-adb0-407f-8472-1270d4fc5263-kube-api-access-rcx8d\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.206460 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc140eba-adb0-407f-8472-1270d4fc5263-combined-ca-bundle\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.206513 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/bc140eba-adb0-407f-8472-1270d4fc5263-ovn-rundir\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.206553 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc140eba-adb0-407f-8472-1270d4fc5263-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.206580 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/bc140eba-adb0-407f-8472-1270d4fc5263-ovs-rundir\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.206605 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc140eba-adb0-407f-8472-1270d4fc5263-config\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.206653 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcx8d\" (UniqueName: \"kubernetes.io/projected/bc140eba-adb0-407f-8472-1270d4fc5263-kube-api-access-rcx8d\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.208242 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/bc140eba-adb0-407f-8472-1270d4fc5263-ovs-rundir\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.213293 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/bc140eba-adb0-407f-8472-1270d4fc5263-ovn-rundir\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.214137 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc140eba-adb0-407f-8472-1270d4fc5263-config\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.236263 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc140eba-adb0-407f-8472-1270d4fc5263-combined-ca-bundle\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.249946 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcx8d\" (UniqueName: \"kubernetes.io/projected/bc140eba-adb0-407f-8472-1270d4fc5263-kube-api-access-rcx8d\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.252682 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bc140eba-adb0-407f-8472-1270d4fc5263-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-5wn4n\" (UID: \"bc140eba-adb0-407f-8472-1270d4fc5263\") " pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.265875 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-nx422"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.281228 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hq96d"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.284165 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.294566 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.298595 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hq96d"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.409696 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.409956 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.410099 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f8nm\" (UniqueName: \"kubernetes.io/projected/d16dce76-ff24-4e18-86ee-872fcf90ee0c-kube-api-access-6f8nm\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.410130 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-config\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.410154 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.453273 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.493363 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.511739 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.511816 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f8nm\" (UniqueName: \"kubernetes.io/projected/d16dce76-ff24-4e18-86ee-872fcf90ee0c-kube-api-access-6f8nm\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.511835 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-config\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.511873 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.511938 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.512813 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.513203 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.513266 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.513627 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-config\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.543852 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f8nm\" (UniqueName: \"kubernetes.io/projected/d16dce76-ff24-4e18-86ee-872fcf90ee0c-kube-api-access-6f8nm\") pod \"dnsmasq-dns-86db49b7ff-hq96d\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.544626 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-5wn4n" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.571667 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-nx422"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.605728 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.840916 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.844380 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.846498 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.846848 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.847008 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-m76sr" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.847651 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.848099 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Dec 06 05:44:18 crc kubenswrapper[4706]: I1206 05:44:18.970692 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-5wn4n"] Dec 06 05:44:18 crc kubenswrapper[4706]: W1206 05:44:18.973208 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc140eba_adb0_407f_8472_1270d4fc5263.slice/crio-933c3212fb0b60022302f7ffb517acdc6f794d7ca548b9715c0c6df8346a6152 WatchSource:0}: Error finding container 933c3212fb0b60022302f7ffb517acdc6f794d7ca548b9715c0c6df8346a6152: Status 404 returned error can't find the container with id 933c3212fb0b60022302f7ffb517acdc6f794d7ca548b9715c0c6df8346a6152 Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.022788 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.022857 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rztqr\" (UniqueName: \"kubernetes.io/projected/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-kube-api-access-rztqr\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.022898 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.022914 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.022936 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.022959 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-config\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.023012 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-scripts\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.113190 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hq96d"] Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124196 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124265 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rztqr\" (UniqueName: \"kubernetes.io/projected/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-kube-api-access-rztqr\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124302 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124319 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124338 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124358 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-config\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.124407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-scripts\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.125360 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-scripts\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.125492 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.125647 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-config\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.128400 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.128549 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.130759 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.147145 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rztqr\" (UniqueName: \"kubernetes.io/projected/0d7d6b1e-41f4-4140-a752-bcf110cf3bd5-kube-api-access-rztqr\") pod \"ovn-northd-0\" (UID: \"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5\") " pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.176721 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.462121 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5wn4n" event={"ID":"bc140eba-adb0-407f-8472-1270d4fc5263","Type":"ContainerStarted","Data":"ba70e1bba212619f68c219ddf18d44cb6c9c12daf780ca3ba21d6d30b724d9b0"} Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.462485 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-5wn4n" event={"ID":"bc140eba-adb0-407f-8472-1270d4fc5263","Type":"ContainerStarted","Data":"933c3212fb0b60022302f7ffb517acdc6f794d7ca548b9715c0c6df8346a6152"} Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.464316 4706 generic.go:334] "Generic (PLEG): container finished" podID="a118ce93-942a-4501-8004-1302d13660b8" containerID="01e4d67543629b537a4ce3a8a0de56fb195c3ad463188dd1eee0faf5e9402bc1" exitCode=0 Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.464375 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" event={"ID":"a118ce93-942a-4501-8004-1302d13660b8","Type":"ContainerDied","Data":"01e4d67543629b537a4ce3a8a0de56fb195c3ad463188dd1eee0faf5e9402bc1"} Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.464400 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" event={"ID":"a118ce93-942a-4501-8004-1302d13660b8","Type":"ContainerStarted","Data":"c41cc8570a8580c4fb495b26c2873c2221f7cc311703a0d18d146727c35c9bd8"} Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.501658 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" event={"ID":"d16dce76-ff24-4e18-86ee-872fcf90ee0c","Type":"ContainerStarted","Data":"18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27"} Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.501708 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" event={"ID":"d16dce76-ff24-4e18-86ee-872fcf90ee0c","Type":"ContainerStarted","Data":"efac256db2e845286e058f14b46ecdc2ca9ac6a7047318658dbebe9135d021cf"} Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.630811 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Dec 06 05:44:19 crc kubenswrapper[4706]: W1206 05:44:19.637005 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d7d6b1e_41f4_4140_a752_bcf110cf3bd5.slice/crio-be68a0ce7ea6e5227105ddadadb10008b376b061edd802d8ee57308b1337b3b5 WatchSource:0}: Error finding container be68a0ce7ea6e5227105ddadadb10008b376b061edd802d8ee57308b1337b3b5: Status 404 returned error can't find the container with id be68a0ce7ea6e5227105ddadadb10008b376b061edd802d8ee57308b1337b3b5 Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.639791 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.812897 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.937168 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-dns-svc\") pod \"a118ce93-942a-4501-8004-1302d13660b8\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.937225 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-config\") pod \"a118ce93-942a-4501-8004-1302d13660b8\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.937319 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqkln\" (UniqueName: \"kubernetes.io/projected/a118ce93-942a-4501-8004-1302d13660b8-kube-api-access-kqkln\") pod \"a118ce93-942a-4501-8004-1302d13660b8\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.937355 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-ovsdbserver-sb\") pod \"a118ce93-942a-4501-8004-1302d13660b8\" (UID: \"a118ce93-942a-4501-8004-1302d13660b8\") " Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.942633 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a118ce93-942a-4501-8004-1302d13660b8-kube-api-access-kqkln" (OuterVolumeSpecName: "kube-api-access-kqkln") pod "a118ce93-942a-4501-8004-1302d13660b8" (UID: "a118ce93-942a-4501-8004-1302d13660b8"). InnerVolumeSpecName "kube-api-access-kqkln". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.955575 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-config" (OuterVolumeSpecName: "config") pod "a118ce93-942a-4501-8004-1302d13660b8" (UID: "a118ce93-942a-4501-8004-1302d13660b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.956033 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "a118ce93-942a-4501-8004-1302d13660b8" (UID: "a118ce93-942a-4501-8004-1302d13660b8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:19 crc kubenswrapper[4706]: I1206 05:44:19.958243 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "a118ce93-942a-4501-8004-1302d13660b8" (UID: "a118ce93-942a-4501-8004-1302d13660b8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.040200 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqkln\" (UniqueName: \"kubernetes.io/projected/a118ce93-942a-4501-8004-1302d13660b8-kube-api-access-kqkln\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.040303 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.040327 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.040338 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a118ce93-942a-4501-8004-1302d13660b8-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.508819 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.508833 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-nx422" event={"ID":"a118ce93-942a-4501-8004-1302d13660b8","Type":"ContainerDied","Data":"c41cc8570a8580c4fb495b26c2873c2221f7cc311703a0d18d146727c35c9bd8"} Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.509289 4706 scope.go:117] "RemoveContainer" containerID="01e4d67543629b537a4ce3a8a0de56fb195c3ad463188dd1eee0faf5e9402bc1" Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.510295 4706 generic.go:334] "Generic (PLEG): container finished" podID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerID="18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27" exitCode=0 Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.510366 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" event={"ID":"d16dce76-ff24-4e18-86ee-872fcf90ee0c","Type":"ContainerDied","Data":"18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27"} Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.512716 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5","Type":"ContainerStarted","Data":"be68a0ce7ea6e5227105ddadadb10008b376b061edd802d8ee57308b1337b3b5"} Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.576506 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-nx422"] Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.581281 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-nx422"] Dec 06 05:44:20 crc kubenswrapper[4706]: I1206 05:44:20.588407 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-5wn4n" podStartSLOduration=3.588390418 podStartE2EDuration="3.588390418s" podCreationTimestamp="2025-12-06 05:44:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:44:20.584900974 +0000 UTC m=+1482.912724908" watchObservedRunningTime="2025-12-06 05:44:20.588390418 +0000 UTC m=+1482.916214362" Dec 06 05:44:21 crc kubenswrapper[4706]: I1206 05:44:21.527115 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" event={"ID":"d16dce76-ff24-4e18-86ee-872fcf90ee0c","Type":"ContainerStarted","Data":"9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42"} Dec 06 05:44:21 crc kubenswrapper[4706]: I1206 05:44:21.527935 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:21 crc kubenswrapper[4706]: I1206 05:44:21.547150 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" podStartSLOduration=3.547135885 podStartE2EDuration="3.547135885s" podCreationTimestamp="2025-12-06 05:44:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:44:21.546747304 +0000 UTC m=+1483.874571248" watchObservedRunningTime="2025-12-06 05:44:21.547135885 +0000 UTC m=+1483.874959829" Dec 06 05:44:22 crc kubenswrapper[4706]: I1206 05:44:22.053573 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a118ce93-942a-4501-8004-1302d13660b8" path="/var/lib/kubelet/pods/a118ce93-942a-4501-8004-1302d13660b8/volumes" Dec 06 05:44:22 crc kubenswrapper[4706]: I1206 05:44:22.535423 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5","Type":"ContainerStarted","Data":"2253838d5057b8457b030db84b72882038fb6239739bc578f967477e4d8eda5e"} Dec 06 05:44:22 crc kubenswrapper[4706]: I1206 05:44:22.535496 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"0d7d6b1e-41f4-4140-a752-bcf110cf3bd5","Type":"ContainerStarted","Data":"924c3b32f5ad8c0e98aab66a62523901a0cabfe0f392333c37db0cb5c5b102b8"} Dec 06 05:44:22 crc kubenswrapper[4706]: I1206 05:44:22.554624 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.763385732 podStartE2EDuration="4.55460551s" podCreationTimestamp="2025-12-06 05:44:18 +0000 UTC" firstStartedPulling="2025-12-06 05:44:19.639620161 +0000 UTC m=+1481.967444105" lastFinishedPulling="2025-12-06 05:44:21.430839939 +0000 UTC m=+1483.758663883" observedRunningTime="2025-12-06 05:44:22.552898763 +0000 UTC m=+1484.880722717" watchObservedRunningTime="2025-12-06 05:44:22.55460551 +0000 UTC m=+1484.882429454" Dec 06 05:44:23 crc kubenswrapper[4706]: I1206 05:44:23.543709 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf","Type":"ContainerStarted","Data":"4bd4b871d0770cbfb0577a84ba44cd524ce31642d03e8df4e3541da33db60c0f"} Dec 06 05:44:23 crc kubenswrapper[4706]: I1206 05:44:23.544295 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Dec 06 05:44:23 crc kubenswrapper[4706]: I1206 05:44:23.545529 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"08955916-6689-445e-830d-6fbfe9a2f460","Type":"ContainerStarted","Data":"d79c59ea7e9b3863c5d6b039c9b53aaf868fd1f81312f63d56ef25a4420b4eb5"} Dec 06 05:44:23 crc kubenswrapper[4706]: I1206 05:44:23.548128 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"74e1bb57-a746-472b-a3b1-ffb875c658e4","Type":"ContainerStarted","Data":"ed85421b9374d286813940bfff9a907a2411a7395688f1b9447bd8f7a5134baa"} Dec 06 05:44:23 crc kubenswrapper[4706]: I1206 05:44:23.548160 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Dec 06 05:44:23 crc kubenswrapper[4706]: I1206 05:44:23.567121 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=13.466972362 podStartE2EDuration="40.56710246s" podCreationTimestamp="2025-12-06 05:43:43 +0000 UTC" firstStartedPulling="2025-12-06 05:43:55.750182401 +0000 UTC m=+1458.078006345" lastFinishedPulling="2025-12-06 05:44:22.850312499 +0000 UTC m=+1485.178136443" observedRunningTime="2025-12-06 05:44:23.560000448 +0000 UTC m=+1485.887824392" watchObservedRunningTime="2025-12-06 05:44:23.56710246 +0000 UTC m=+1485.894926394" Dec 06 05:44:25 crc kubenswrapper[4706]: I1206 05:44:25.643057 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 06 05:44:27 crc kubenswrapper[4706]: I1206 05:44:27.577012 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e","Type":"ContainerStarted","Data":"3af9c99284043d95d1d0e700f9d3e8775e1b02554878dda547e21c5836505241"} Dec 06 05:44:28 crc kubenswrapper[4706]: I1206 05:44:28.607205 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:28 crc kubenswrapper[4706]: I1206 05:44:28.655136 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-xfjn8"] Dec 06 05:44:28 crc kubenswrapper[4706]: I1206 05:44:28.655448 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" podUID="e7604eb9-df25-4027-bef0-567366a35e27" containerName="dnsmasq-dns" containerID="cri-o://a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea" gracePeriod=10 Dec 06 05:44:28 crc kubenswrapper[4706]: I1206 05:44:28.911387 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.223789 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.395462 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-dns-svc\") pod \"e7604eb9-df25-4027-bef0-567366a35e27\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.395597 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-config\") pod \"e7604eb9-df25-4027-bef0-567366a35e27\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.396282 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wq5b9\" (UniqueName: \"kubernetes.io/projected/e7604eb9-df25-4027-bef0-567366a35e27-kube-api-access-wq5b9\") pod \"e7604eb9-df25-4027-bef0-567366a35e27\" (UID: \"e7604eb9-df25-4027-bef0-567366a35e27\") " Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.401085 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7604eb9-df25-4027-bef0-567366a35e27-kube-api-access-wq5b9" (OuterVolumeSpecName: "kube-api-access-wq5b9") pod "e7604eb9-df25-4027-bef0-567366a35e27" (UID: "e7604eb9-df25-4027-bef0-567366a35e27"). InnerVolumeSpecName "kube-api-access-wq5b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.436861 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-config" (OuterVolumeSpecName: "config") pod "e7604eb9-df25-4027-bef0-567366a35e27" (UID: "e7604eb9-df25-4027-bef0-567366a35e27"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.437931 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e7604eb9-df25-4027-bef0-567366a35e27" (UID: "e7604eb9-df25-4027-bef0-567366a35e27"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.498380 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.499218 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7604eb9-df25-4027-bef0-567366a35e27-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.499344 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wq5b9\" (UniqueName: \"kubernetes.io/projected/e7604eb9-df25-4027-bef0-567366a35e27-kube-api-access-wq5b9\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.594320 4706 generic.go:334] "Generic (PLEG): container finished" podID="e7604eb9-df25-4027-bef0-567366a35e27" containerID="a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea" exitCode=0 Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.594363 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" event={"ID":"e7604eb9-df25-4027-bef0-567366a35e27","Type":"ContainerDied","Data":"a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea"} Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.594414 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" event={"ID":"e7604eb9-df25-4027-bef0-567366a35e27","Type":"ContainerDied","Data":"7b49b7b73d4264c52e71165e44d38a138cb57cd8d91a254fa3cced75852b72a1"} Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.594432 4706 scope.go:117] "RemoveContainer" containerID="a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.594561 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-xfjn8" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.624751 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-xfjn8"] Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.629918 4706 scope.go:117] "RemoveContainer" containerID="54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.630120 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-xfjn8"] Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.644866 4706 scope.go:117] "RemoveContainer" containerID="a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea" Dec 06 05:44:29 crc kubenswrapper[4706]: E1206 05:44:29.646642 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea\": container with ID starting with a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea not found: ID does not exist" containerID="a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.646947 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea"} err="failed to get container status \"a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea\": rpc error: code = NotFound desc = could not find container \"a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea\": container with ID starting with a103fdd9aa4ee5516d0e9db715571ff65fc6d5ee7e2628e3c7e7930cf6704cea not found: ID does not exist" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.647145 4706 scope.go:117] "RemoveContainer" containerID="54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1" Dec 06 05:44:29 crc kubenswrapper[4706]: E1206 05:44:29.653229 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1\": container with ID starting with 54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1 not found: ID does not exist" containerID="54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1" Dec 06 05:44:29 crc kubenswrapper[4706]: I1206 05:44:29.653278 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1"} err="failed to get container status \"54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1\": rpc error: code = NotFound desc = could not find container \"54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1\": container with ID starting with 54c74bde25af9a05ebad2b044fbf15a5ebd65898dab6fa06bc4dafe1deb465a1 not found: ID does not exist" Dec 06 05:44:30 crc kubenswrapper[4706]: I1206 05:44:30.049686 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7604eb9-df25-4027-bef0-567366a35e27" path="/var/lib/kubelet/pods/e7604eb9-df25-4027-bef0-567366a35e27/volumes" Dec 06 05:44:34 crc kubenswrapper[4706]: I1206 05:44:34.260958 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.699391 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-8p245"] Dec 06 05:44:35 crc kubenswrapper[4706]: E1206 05:44:35.703438 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7604eb9-df25-4027-bef0-567366a35e27" containerName="init" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.703463 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7604eb9-df25-4027-bef0-567366a35e27" containerName="init" Dec 06 05:44:35 crc kubenswrapper[4706]: E1206 05:44:35.703509 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7604eb9-df25-4027-bef0-567366a35e27" containerName="dnsmasq-dns" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.703515 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7604eb9-df25-4027-bef0-567366a35e27" containerName="dnsmasq-dns" Dec 06 05:44:35 crc kubenswrapper[4706]: E1206 05:44:35.703527 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a118ce93-942a-4501-8004-1302d13660b8" containerName="init" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.703533 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a118ce93-942a-4501-8004-1302d13660b8" containerName="init" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.703798 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7604eb9-df25-4027-bef0-567366a35e27" containerName="dnsmasq-dns" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.703812 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a118ce93-942a-4501-8004-1302d13660b8" containerName="init" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.704701 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.708808 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-dns-svc\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.708877 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.708934 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kbvh\" (UniqueName: \"kubernetes.io/projected/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-kube-api-access-4kbvh\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.708968 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.708994 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-config\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.719171 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-8p245"] Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.810232 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kbvh\" (UniqueName: \"kubernetes.io/projected/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-kube-api-access-4kbvh\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.810305 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.810336 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-config\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.810375 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-dns-svc\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.810412 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.811233 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.811393 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-dns-svc\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.813468 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.814012 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-config\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.834453 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kbvh\" (UniqueName: \"kubernetes.io/projected/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-kube-api-access-4kbvh\") pod \"dnsmasq-dns-698758b865-8p245\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.962088 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.962159 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.962210 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.963007 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"71fb78259889c3e53f18a29621b104746019c251e6090d6297b3d1c61fdcf223"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:44:35 crc kubenswrapper[4706]: I1206 05:44:35.963101 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://71fb78259889c3e53f18a29621b104746019c251e6090d6297b3d1c61fdcf223" gracePeriod=600 Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.042188 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.939525 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.953272 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.957493 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.957803 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.958159 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-mm5ld" Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.958352 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Dec 06 05:44:36 crc kubenswrapper[4706]: I1206 05:44:36.959171 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.000446 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-8p245"] Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.141065 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b2szw\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-kube-api-access-b2szw\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.141161 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.141302 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/91f74906-ec70-4b0c-a657-d075d18f488b-lock\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.141628 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.141761 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/91f74906-ec70-4b0c-a657-d075d18f488b-cache\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.243278 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.243344 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/91f74906-ec70-4b0c-a657-d075d18f488b-cache\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.243396 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b2szw\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-kube-api-access-b2szw\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.243424 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.243553 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/91f74906-ec70-4b0c-a657-d075d18f488b-lock\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.243820 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: E1206 05:44:37.244357 4706 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 06 05:44:37 crc kubenswrapper[4706]: E1206 05:44:37.244390 4706 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 06 05:44:37 crc kubenswrapper[4706]: E1206 05:44:37.244460 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift podName:91f74906-ec70-4b0c-a657-d075d18f488b nodeName:}" failed. No retries permitted until 2025-12-06 05:44:37.744438158 +0000 UTC m=+1500.072262182 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift") pod "swift-storage-0" (UID: "91f74906-ec70-4b0c-a657-d075d18f488b") : configmap "swift-ring-files" not found Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.253633 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/91f74906-ec70-4b0c-a657-d075d18f488b-cache\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.253872 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/91f74906-ec70-4b0c-a657-d075d18f488b-lock\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.280649 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.282896 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b2szw\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-kube-api-access-b2szw\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.677556 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-8p245" event={"ID":"1a54d701-bfc3-4f6e-acc3-b64b50e91d30","Type":"ContainerStarted","Data":"50528f97c90e230dd4f6c61fd8a8fd896acfab6903d065f086d232de6a3062a9"} Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.680580 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="71fb78259889c3e53f18a29621b104746019c251e6090d6297b3d1c61fdcf223" exitCode=0 Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.680677 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"71fb78259889c3e53f18a29621b104746019c251e6090d6297b3d1c61fdcf223"} Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.680735 4706 scope.go:117] "RemoveContainer" containerID="7cb88f72dc580dec882828d525bf28a4003301f3e0567fd190938d53e4a87ab0" Dec 06 05:44:37 crc kubenswrapper[4706]: I1206 05:44:37.750539 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:37 crc kubenswrapper[4706]: E1206 05:44:37.750844 4706 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 06 05:44:37 crc kubenswrapper[4706]: E1206 05:44:37.750891 4706 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 06 05:44:37 crc kubenswrapper[4706]: E1206 05:44:37.750984 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift podName:91f74906-ec70-4b0c-a657-d075d18f488b nodeName:}" failed. No retries permitted until 2025-12-06 05:44:38.75095463 +0000 UTC m=+1501.078778574 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift") pod "swift-storage-0" (UID: "91f74906-ec70-4b0c-a657-d075d18f488b") : configmap "swift-ring-files" not found Dec 06 05:44:38 crc kubenswrapper[4706]: I1206 05:44:38.689863 4706 generic.go:334] "Generic (PLEG): container finished" podID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerID="4d1722cc4d9af4ad56f634aa9cf0e2ff148679f7d4baeea91160b6fb76f8b4d3" exitCode=0 Dec 06 05:44:38 crc kubenswrapper[4706]: I1206 05:44:38.690011 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-8p245" event={"ID":"1a54d701-bfc3-4f6e-acc3-b64b50e91d30","Type":"ContainerDied","Data":"4d1722cc4d9af4ad56f634aa9cf0e2ff148679f7d4baeea91160b6fb76f8b4d3"} Dec 06 05:44:38 crc kubenswrapper[4706]: I1206 05:44:38.698517 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a"} Dec 06 05:44:38 crc kubenswrapper[4706]: I1206 05:44:38.767765 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:38 crc kubenswrapper[4706]: E1206 05:44:38.767952 4706 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 06 05:44:38 crc kubenswrapper[4706]: E1206 05:44:38.767984 4706 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 06 05:44:38 crc kubenswrapper[4706]: E1206 05:44:38.768040 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift podName:91f74906-ec70-4b0c-a657-d075d18f488b nodeName:}" failed. No retries permitted until 2025-12-06 05:44:40.768022165 +0000 UTC m=+1503.095846099 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift") pod "swift-storage-0" (UID: "91f74906-ec70-4b0c-a657-d075d18f488b") : configmap "swift-ring-files" not found Dec 06 05:44:39 crc kubenswrapper[4706]: I1206 05:44:39.706913 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-8p245" event={"ID":"1a54d701-bfc3-4f6e-acc3-b64b50e91d30","Type":"ContainerStarted","Data":"4a5d58f6418f0596d2f8097adc7df29b2a513be60819a058e4cd4d12db366016"} Dec 06 05:44:39 crc kubenswrapper[4706]: I1206 05:44:39.727553 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-8p245" podStartSLOduration=4.727536172 podStartE2EDuration="4.727536172s" podCreationTimestamp="2025-12-06 05:44:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:44:39.726781662 +0000 UTC m=+1502.054605626" watchObservedRunningTime="2025-12-06 05:44:39.727536172 +0000 UTC m=+1502.055360116" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.722213 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.801995 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:40 crc kubenswrapper[4706]: E1206 05:44:40.802213 4706 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 06 05:44:40 crc kubenswrapper[4706]: E1206 05:44:40.802235 4706 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 06 05:44:40 crc kubenswrapper[4706]: E1206 05:44:40.802285 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift podName:91f74906-ec70-4b0c-a657-d075d18f488b nodeName:}" failed. No retries permitted until 2025-12-06 05:44:44.802266856 +0000 UTC m=+1507.130090800 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift") pod "swift-storage-0" (UID: "91f74906-ec70-4b0c-a657-d075d18f488b") : configmap "swift-ring-files" not found Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.893548 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-nmtq6"] Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.894827 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.897356 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.897461 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.897475 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.927847 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-9pw6t"] Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.928976 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.933689 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-nmtq6"] Dec 06 05:44:40 crc kubenswrapper[4706]: E1206 05:44:40.934241 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-bcs5g ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-nmtq6" podUID="fa73ba50-674d-406c-ad62-1470d1c9e64a" Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.948115 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9pw6t"] Dec 06 05:44:40 crc kubenswrapper[4706]: I1206 05:44:40.956368 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-nmtq6"] Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.005209 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-dispersionconf\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.005252 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-swiftconf\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.005304 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcs5g\" (UniqueName: \"kubernetes.io/projected/fa73ba50-674d-406c-ad62-1470d1c9e64a-kube-api-access-bcs5g\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.005339 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fa73ba50-674d-406c-ad62-1470d1c9e64a-etc-swift\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.005607 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-scripts\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.005961 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-ring-data-devices\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.006239 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-combined-ca-bundle\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107531 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-swiftconf\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107613 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-scripts\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107664 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-dispersionconf\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107687 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-swiftconf\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107717 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcs5g\" (UniqueName: \"kubernetes.io/projected/fa73ba50-674d-406c-ad62-1470d1c9e64a-kube-api-access-bcs5g\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107759 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7dsrw\" (UniqueName: \"kubernetes.io/projected/abd1400e-de80-48fe-bad4-3e3c3af98355-kube-api-access-7dsrw\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107784 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fa73ba50-674d-406c-ad62-1470d1c9e64a-etc-swift\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107812 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-ring-data-devices\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107838 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-scripts\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107914 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-ring-data-devices\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107949 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-combined-ca-bundle\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.107989 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/abd1400e-de80-48fe-bad4-3e3c3af98355-etc-swift\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.108012 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-combined-ca-bundle\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.108035 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-dispersionconf\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.108168 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fa73ba50-674d-406c-ad62-1470d1c9e64a-etc-swift\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.108703 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-ring-data-devices\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.108750 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-scripts\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.112575 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-swiftconf\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.114222 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-dispersionconf\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.120370 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-combined-ca-bundle\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.133420 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcs5g\" (UniqueName: \"kubernetes.io/projected/fa73ba50-674d-406c-ad62-1470d1c9e64a-kube-api-access-bcs5g\") pod \"swift-ring-rebalance-nmtq6\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209291 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-scripts\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209379 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-dispersionconf\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209410 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-swiftconf\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209452 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7dsrw\" (UniqueName: \"kubernetes.io/projected/abd1400e-de80-48fe-bad4-3e3c3af98355-kube-api-access-7dsrw\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-ring-data-devices\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209595 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/abd1400e-de80-48fe-bad4-3e3c3af98355-etc-swift\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.209620 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-combined-ca-bundle\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.210958 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-ring-data-devices\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.211266 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/abd1400e-de80-48fe-bad4-3e3c3af98355-etc-swift\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.211427 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-scripts\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.213798 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-dispersionconf\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.213796 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-combined-ca-bundle\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.214676 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-swiftconf\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.230137 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7dsrw\" (UniqueName: \"kubernetes.io/projected/abd1400e-de80-48fe-bad4-3e3c3af98355-kube-api-access-7dsrw\") pod \"swift-ring-rebalance-9pw6t\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.244151 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-mm5ld" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.253462 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.671197 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-9pw6t"] Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.730834 4706 generic.go:334] "Generic (PLEG): container finished" podID="08955916-6689-445e-830d-6fbfe9a2f460" containerID="d79c59ea7e9b3863c5d6b039c9b53aaf868fd1f81312f63d56ef25a4420b4eb5" exitCode=0 Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.730911 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"08955916-6689-445e-830d-6fbfe9a2f460","Type":"ContainerDied","Data":"d79c59ea7e9b3863c5d6b039c9b53aaf868fd1f81312f63d56ef25a4420b4eb5"} Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.734497 4706 generic.go:334] "Generic (PLEG): container finished" podID="74e1bb57-a746-472b-a3b1-ffb875c658e4" containerID="ed85421b9374d286813940bfff9a907a2411a7395688f1b9447bd8f7a5134baa" exitCode=0 Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.734599 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"74e1bb57-a746-472b-a3b1-ffb875c658e4","Type":"ContainerDied","Data":"ed85421b9374d286813940bfff9a907a2411a7395688f1b9447bd8f7a5134baa"} Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.736572 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9pw6t" event={"ID":"abd1400e-de80-48fe-bad4-3e3c3af98355","Type":"ContainerStarted","Data":"74a34cbe66ca0c138b5ddc996007956679926e496790cb0d58ec0c8eda610c69"} Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.736744 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.752963 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920594 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcs5g\" (UniqueName: \"kubernetes.io/projected/fa73ba50-674d-406c-ad62-1470d1c9e64a-kube-api-access-bcs5g\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920687 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-ring-data-devices\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920712 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-dispersionconf\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920793 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-combined-ca-bundle\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920812 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-swiftconf\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920861 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fa73ba50-674d-406c-ad62-1470d1c9e64a-etc-swift\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.920902 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-scripts\") pod \"fa73ba50-674d-406c-ad62-1470d1c9e64a\" (UID: \"fa73ba50-674d-406c-ad62-1470d1c9e64a\") " Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.921388 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa73ba50-674d-406c-ad62-1470d1c9e64a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.921548 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.921648 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-scripts" (OuterVolumeSpecName: "scripts") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.922700 4706 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/fa73ba50-674d-406c-ad62-1470d1c9e64a-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.922721 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.922731 4706 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/fa73ba50-674d-406c-ad62-1470d1c9e64a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.924699 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.924812 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa73ba50-674d-406c-ad62-1470d1c9e64a-kube-api-access-bcs5g" (OuterVolumeSpecName: "kube-api-access-bcs5g") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "kube-api-access-bcs5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.925770 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:44:41 crc kubenswrapper[4706]: I1206 05:44:41.926229 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "fa73ba50-674d-406c-ad62-1470d1c9e64a" (UID: "fa73ba50-674d-406c-ad62-1470d1c9e64a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.024424 4706 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.024467 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.024480 4706 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/fa73ba50-674d-406c-ad62-1470d1c9e64a-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.024492 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcs5g\" (UniqueName: \"kubernetes.io/projected/fa73ba50-674d-406c-ad62-1470d1c9e64a-kube-api-access-bcs5g\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.765163 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"08955916-6689-445e-830d-6fbfe9a2f460","Type":"ContainerStarted","Data":"48e77aab51327a9240c0349d2e1b8a051e404854caeae0a4adeaad149873bab3"} Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.766948 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-nmtq6" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.767556 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"74e1bb57-a746-472b-a3b1-ffb875c658e4","Type":"ContainerStarted","Data":"f3c4703de5a7bb72456c38ce2bb973f32df38890397051aef1d9a4a594d3dfa0"} Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.793782 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371976.061012 podStartE2EDuration="1m0.793762602s" podCreationTimestamp="2025-12-06 05:43:42 +0000 UTC" firstStartedPulling="2025-12-06 05:43:55.762515804 +0000 UTC m=+1458.090339738" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:44:42.785719504 +0000 UTC m=+1505.113543448" watchObservedRunningTime="2025-12-06 05:44:42.793762602 +0000 UTC m=+1505.121586676" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.819406 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=35.724236732 podStartE2EDuration="1m2.819388755s" podCreationTimestamp="2025-12-06 05:43:40 +0000 UTC" firstStartedPulling="2025-12-06 05:43:55.75679719 +0000 UTC m=+1458.084621134" lastFinishedPulling="2025-12-06 05:44:22.851949213 +0000 UTC m=+1485.179773157" observedRunningTime="2025-12-06 05:44:42.81296637 +0000 UTC m=+1505.140790324" watchObservedRunningTime="2025-12-06 05:44:42.819388755 +0000 UTC m=+1505.147212699" Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.862341 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-nmtq6"] Dec 06 05:44:42 crc kubenswrapper[4706]: I1206 05:44:42.868436 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-nmtq6"] Dec 06 05:44:43 crc kubenswrapper[4706]: I1206 05:44:43.525715 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Dec 06 05:44:43 crc kubenswrapper[4706]: I1206 05:44:43.526065 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Dec 06 05:44:44 crc kubenswrapper[4706]: I1206 05:44:44.048837 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa73ba50-674d-406c-ad62-1470d1c9e64a" path="/var/lib/kubelet/pods/fa73ba50-674d-406c-ad62-1470d1c9e64a/volumes" Dec 06 05:44:44 crc kubenswrapper[4706]: I1206 05:44:44.291719 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:44:44 crc kubenswrapper[4706]: I1206 05:44:44.566358 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-cbrg2" podUID="cde7e1a3-dd72-47aa-a0b5-117bc2c53885" containerName="ovn-controller" probeResult="failure" output=< Dec 06 05:44:44 crc kubenswrapper[4706]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 06 05:44:44 crc kubenswrapper[4706]: > Dec 06 05:44:44 crc kubenswrapper[4706]: I1206 05:44:44.870761 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:44 crc kubenswrapper[4706]: E1206 05:44:44.870915 4706 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 06 05:44:44 crc kubenswrapper[4706]: E1206 05:44:44.870929 4706 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 06 05:44:44 crc kubenswrapper[4706]: E1206 05:44:44.870968 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift podName:91f74906-ec70-4b0c-a657-d075d18f488b nodeName:}" failed. No retries permitted until 2025-12-06 05:44:52.870954954 +0000 UTC m=+1515.198778888 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift") pod "swift-storage-0" (UID: "91f74906-ec70-4b0c-a657-d075d18f488b") : configmap "swift-ring-files" not found Dec 06 05:44:45 crc kubenswrapper[4706]: I1206 05:44:45.790761 4706 generic.go:334] "Generic (PLEG): container finished" podID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerID="59ea841a87bb87aa6d7b186eaa2155dbd28a5d718db5af4f41b422fa2c8ac0c7" exitCode=0 Dec 06 05:44:45 crc kubenswrapper[4706]: I1206 05:44:45.790839 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bfd60e65-9bee-4772-bbd5-b6d64a5a225c","Type":"ContainerDied","Data":"59ea841a87bb87aa6d7b186eaa2155dbd28a5d718db5af4f41b422fa2c8ac0c7"} Dec 06 05:44:46 crc kubenswrapper[4706]: I1206 05:44:46.047829 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:44:46 crc kubenswrapper[4706]: I1206 05:44:46.096076 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hq96d"] Dec 06 05:44:46 crc kubenswrapper[4706]: I1206 05:44:46.097090 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="dnsmasq-dns" containerID="cri-o://9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42" gracePeriod=10 Dec 06 05:44:48 crc kubenswrapper[4706]: I1206 05:44:48.606754 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.106:5353: connect: connection refused" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.275029 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-cj4kx" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.399083 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.492737 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-cbrg2-config-j4nqj"] Dec 06 05:44:49 crc kubenswrapper[4706]: E1206 05:44:49.496360 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="init" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.496400 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="init" Dec 06 05:44:49 crc kubenswrapper[4706]: E1206 05:44:49.496429 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="dnsmasq-dns" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.496439 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="dnsmasq-dns" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.496646 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerName="dnsmasq-dns" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.497505 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.499523 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.509280 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cbrg2-config-j4nqj"] Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.547292 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6f8nm\" (UniqueName: \"kubernetes.io/projected/d16dce76-ff24-4e18-86ee-872fcf90ee0c-kube-api-access-6f8nm\") pod \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.548471 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-dns-svc\") pod \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.548555 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-config\") pod \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.548584 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-nb\") pod \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.548654 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-sb\") pod \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\" (UID: \"d16dce76-ff24-4e18-86ee-872fcf90ee0c\") " Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.566274 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d16dce76-ff24-4e18-86ee-872fcf90ee0c-kube-api-access-6f8nm" (OuterVolumeSpecName: "kube-api-access-6f8nm") pod "d16dce76-ff24-4e18-86ee-872fcf90ee0c" (UID: "d16dce76-ff24-4e18-86ee-872fcf90ee0c"). InnerVolumeSpecName "kube-api-access-6f8nm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.583185 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-cbrg2" podUID="cde7e1a3-dd72-47aa-a0b5-117bc2c53885" containerName="ovn-controller" probeResult="failure" output=< Dec 06 05:44:49 crc kubenswrapper[4706]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Dec 06 05:44:49 crc kubenswrapper[4706]: > Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.592488 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-config" (OuterVolumeSpecName: "config") pod "d16dce76-ff24-4e18-86ee-872fcf90ee0c" (UID: "d16dce76-ff24-4e18-86ee-872fcf90ee0c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.603810 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d16dce76-ff24-4e18-86ee-872fcf90ee0c" (UID: "d16dce76-ff24-4e18-86ee-872fcf90ee0c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.607497 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d16dce76-ff24-4e18-86ee-872fcf90ee0c" (UID: "d16dce76-ff24-4e18-86ee-872fcf90ee0c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.619766 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d16dce76-ff24-4e18-86ee-872fcf90ee0c" (UID: "d16dce76-ff24-4e18-86ee-872fcf90ee0c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.650964 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run-ovn\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651016 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-scripts\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651132 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-additional-scripts\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651164 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651438 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxnbp\" (UniqueName: \"kubernetes.io/projected/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-kube-api-access-dxnbp\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651495 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-log-ovn\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651626 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651653 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6f8nm\" (UniqueName: \"kubernetes.io/projected/d16dce76-ff24-4e18-86ee-872fcf90ee0c-kube-api-access-6f8nm\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651670 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651684 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.651696 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d16dce76-ff24-4e18-86ee-872fcf90ee0c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.752895 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753025 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxnbp\" (UniqueName: \"kubernetes.io/projected/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-kube-api-access-dxnbp\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753094 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-log-ovn\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753157 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run-ovn\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753183 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-scripts\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753279 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-additional-scripts\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753284 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753440 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run-ovn\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.753860 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-log-ovn\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.754272 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-additional-scripts\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.755196 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-scripts\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.773540 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxnbp\" (UniqueName: \"kubernetes.io/projected/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-kube-api-access-dxnbp\") pod \"ovn-controller-cbrg2-config-j4nqj\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.811734 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.832197 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9pw6t" event={"ID":"abd1400e-de80-48fe-bad4-3e3c3af98355","Type":"ContainerStarted","Data":"efe342cdb1e711cf1ae532d8f7b4a4afacb23f52f73e3723126cde43ef76769d"} Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.835988 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bfd60e65-9bee-4772-bbd5-b6d64a5a225c","Type":"ContainerStarted","Data":"5969ecbb31882bd14416f895bd2e15ae1d88ac9c6fa0fe23318ae5bee33e8892"} Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.836268 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.851855 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-9pw6t" podStartSLOduration=6.632478418 podStartE2EDuration="9.85183856s" podCreationTimestamp="2025-12-06 05:44:40 +0000 UTC" firstStartedPulling="2025-12-06 05:44:41.675110338 +0000 UTC m=+1504.002934302" lastFinishedPulling="2025-12-06 05:44:44.8944705 +0000 UTC m=+1507.222294444" observedRunningTime="2025-12-06 05:44:49.847654557 +0000 UTC m=+1512.175478501" watchObservedRunningTime="2025-12-06 05:44:49.85183856 +0000 UTC m=+1512.179662504" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.852127 4706 generic.go:334] "Generic (PLEG): container finished" podID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" containerID="9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42" exitCode=0 Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.852174 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.852180 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" event={"ID":"d16dce76-ff24-4e18-86ee-872fcf90ee0c","Type":"ContainerDied","Data":"9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42"} Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.852337 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-hq96d" event={"ID":"d16dce76-ff24-4e18-86ee-872fcf90ee0c","Type":"ContainerDied","Data":"efac256db2e845286e058f14b46ecdc2ca9ac6a7047318658dbebe9135d021cf"} Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.852381 4706 scope.go:117] "RemoveContainer" containerID="9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.878029 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=55.62635825 podStartE2EDuration="1m10.878014347s" podCreationTimestamp="2025-12-06 05:43:39 +0000 UTC" firstStartedPulling="2025-12-06 05:43:55.636859925 +0000 UTC m=+1457.964683869" lastFinishedPulling="2025-12-06 05:44:10.888516022 +0000 UTC m=+1473.216339966" observedRunningTime="2025-12-06 05:44:49.871724817 +0000 UTC m=+1512.199548761" watchObservedRunningTime="2025-12-06 05:44:49.878014347 +0000 UTC m=+1512.205838291" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.890481 4706 scope.go:117] "RemoveContainer" containerID="18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.891212 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hq96d"] Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.904772 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-hq96d"] Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.922889 4706 scope.go:117] "RemoveContainer" containerID="9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42" Dec 06 05:44:49 crc kubenswrapper[4706]: E1206 05:44:49.923352 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42\": container with ID starting with 9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42 not found: ID does not exist" containerID="9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.923408 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42"} err="failed to get container status \"9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42\": rpc error: code = NotFound desc = could not find container \"9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42\": container with ID starting with 9dbd8668a18d8149257527904fb2c3a30d8d0402319635407a5c117a58888f42 not found: ID does not exist" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.923439 4706 scope.go:117] "RemoveContainer" containerID="18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27" Dec 06 05:44:49 crc kubenswrapper[4706]: E1206 05:44:49.923716 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27\": container with ID starting with 18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27 not found: ID does not exist" containerID="18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27" Dec 06 05:44:49 crc kubenswrapper[4706]: I1206 05:44:49.923746 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27"} err="failed to get container status \"18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27\": rpc error: code = NotFound desc = could not find container \"18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27\": container with ID starting with 18ef681464b65e863fb37b8c18c8e2e58765e7e865b3f0db33f3283e3b5ffc27 not found: ID does not exist" Dec 06 05:44:50 crc kubenswrapper[4706]: I1206 05:44:50.045960 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d16dce76-ff24-4e18-86ee-872fcf90ee0c" path="/var/lib/kubelet/pods/d16dce76-ff24-4e18-86ee-872fcf90ee0c/volumes" Dec 06 05:44:50 crc kubenswrapper[4706]: I1206 05:44:50.296717 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cbrg2-config-j4nqj"] Dec 06 05:44:50 crc kubenswrapper[4706]: W1206 05:44:50.297538 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ed871f7_cfda_4ffb_9337_39f3cd53b4ba.slice/crio-76a4939548187d0c009f5866e0cdc9352b8a3312ce31b84c490477c318dfbc3c WatchSource:0}: Error finding container 76a4939548187d0c009f5866e0cdc9352b8a3312ce31b84c490477c318dfbc3c: Status 404 returned error can't find the container with id 76a4939548187d0c009f5866e0cdc9352b8a3312ce31b84c490477c318dfbc3c Dec 06 05:44:50 crc kubenswrapper[4706]: I1206 05:44:50.862794 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-j4nqj" event={"ID":"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba","Type":"ContainerStarted","Data":"7f3ee022e9a79da46433f655c4dae55bf7fa9d1fab50cb443202634fd1b1843e"} Dec 06 05:44:50 crc kubenswrapper[4706]: I1206 05:44:50.863242 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-j4nqj" event={"ID":"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba","Type":"ContainerStarted","Data":"76a4939548187d0c009f5866e0cdc9352b8a3312ce31b84c490477c318dfbc3c"} Dec 06 05:44:50 crc kubenswrapper[4706]: I1206 05:44:50.880494 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-cbrg2-config-j4nqj" podStartSLOduration=1.880476687 podStartE2EDuration="1.880476687s" podCreationTimestamp="2025-12-06 05:44:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:44:50.878508714 +0000 UTC m=+1513.206332658" watchObservedRunningTime="2025-12-06 05:44:50.880476687 +0000 UTC m=+1513.208300631" Dec 06 05:44:51 crc kubenswrapper[4706]: I1206 05:44:51.870488 4706 generic.go:334] "Generic (PLEG): container finished" podID="1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" containerID="7f3ee022e9a79da46433f655c4dae55bf7fa9d1fab50cb443202634fd1b1843e" exitCode=0 Dec 06 05:44:51 crc kubenswrapper[4706]: I1206 05:44:51.870553 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-j4nqj" event={"ID":"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba","Type":"ContainerDied","Data":"7f3ee022e9a79da46433f655c4dae55bf7fa9d1fab50cb443202634fd1b1843e"} Dec 06 05:44:51 crc kubenswrapper[4706]: I1206 05:44:51.960990 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Dec 06 05:44:51 crc kubenswrapper[4706]: I1206 05:44:51.961111 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Dec 06 05:44:52 crc kubenswrapper[4706]: I1206 05:44:52.898642 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:44:52 crc kubenswrapper[4706]: E1206 05:44:52.898854 4706 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Dec 06 05:44:52 crc kubenswrapper[4706]: E1206 05:44:52.899078 4706 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Dec 06 05:44:52 crc kubenswrapper[4706]: E1206 05:44:52.899159 4706 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift podName:91f74906-ec70-4b0c-a657-d075d18f488b nodeName:}" failed. No retries permitted until 2025-12-06 05:45:08.899134897 +0000 UTC m=+1531.226958881 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift") pod "swift-storage-0" (UID: "91f74906-ec70-4b0c-a657-d075d18f488b") : configmap "swift-ring-files" not found Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.253714 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.305876 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-additional-scripts\") pod \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.305946 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run\") pod \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306025 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-log-ovn\") pod \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306071 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run-ovn\") pod \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306120 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-scripts\") pod \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306165 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dxnbp\" (UniqueName: \"kubernetes.io/projected/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-kube-api-access-dxnbp\") pod \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\" (UID: \"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba\") " Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306356 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" (UID: "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306373 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" (UID: "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306390 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run" (OuterVolumeSpecName: "var-run") pod "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" (UID: "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306749 4706 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306773 4706 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.306785 4706 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.307104 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" (UID: "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.307314 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-scripts" (OuterVolumeSpecName: "scripts") pod "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" (UID: "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.323316 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-kube-api-access-dxnbp" (OuterVolumeSpecName: "kube-api-access-dxnbp") pod "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" (UID: "1ed871f7-cfda-4ffb-9337-39f3cd53b4ba"). InnerVolumeSpecName "kube-api-access-dxnbp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.408912 4706 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.408977 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.408996 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dxnbp\" (UniqueName: \"kubernetes.io/projected/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba-kube-api-access-dxnbp\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.892915 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-j4nqj" event={"ID":"1ed871f7-cfda-4ffb-9337-39f3cd53b4ba","Type":"ContainerDied","Data":"76a4939548187d0c009f5866e0cdc9352b8a3312ce31b84c490477c318dfbc3c"} Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.892966 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76a4939548187d0c009f5866e0cdc9352b8a3312ce31b84c490477c318dfbc3c" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.893023 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-j4nqj" Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.983469 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-cbrg2-config-j4nqj"] Dec 06 05:44:53 crc kubenswrapper[4706]: I1206 05:44:53.989847 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-cbrg2-config-j4nqj"] Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.045362 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" path="/var/lib/kubelet/pods/1ed871f7-cfda-4ffb-9337-39f3cd53b4ba/volumes" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.090418 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-cbrg2-config-fws8p"] Dec 06 05:44:54 crc kubenswrapper[4706]: E1206 05:44:54.090848 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" containerName="ovn-config" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.090870 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" containerName="ovn-config" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.091105 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ed871f7-cfda-4ffb-9337-39f3cd53b4ba" containerName="ovn-config" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.091739 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.094278 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.112944 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cbrg2-config-fws8p"] Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.143252 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.222841 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-scripts\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.223007 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-additional-scripts\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.223135 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.223181 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-log-ovn\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.223315 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zw8g5\" (UniqueName: \"kubernetes.io/projected/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-kube-api-access-zw8g5\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.223344 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run-ovn\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.244764 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325117 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zw8g5\" (UniqueName: \"kubernetes.io/projected/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-kube-api-access-zw8g5\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325162 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run-ovn\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325216 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-scripts\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325277 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-additional-scripts\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325297 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325322 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-log-ovn\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.325918 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-log-ovn\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.326253 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.326355 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run-ovn\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.326668 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-additional-scripts\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.328094 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-scripts\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.354087 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zw8g5\" (UniqueName: \"kubernetes.io/projected/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-kube-api-access-zw8g5\") pod \"ovn-controller-cbrg2-config-fws8p\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.405996 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.585635 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-cbrg2" Dec 06 05:44:54 crc kubenswrapper[4706]: I1206 05:44:54.957715 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-cbrg2-config-fws8p"] Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.657090 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.736762 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.911040 4706 generic.go:334] "Generic (PLEG): container finished" podID="abd1400e-de80-48fe-bad4-3e3c3af98355" containerID="efe342cdb1e711cf1ae532d8f7b4a4afacb23f52f73e3723126cde43ef76769d" exitCode=0 Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.911130 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9pw6t" event={"ID":"abd1400e-de80-48fe-bad4-3e3c3af98355","Type":"ContainerDied","Data":"efe342cdb1e711cf1ae532d8f7b4a4afacb23f52f73e3723126cde43ef76769d"} Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.912830 4706 generic.go:334] "Generic (PLEG): container finished" podID="d3a6858a-513f-4844-a1aa-0d9ad98d24fb" containerID="425ea330c8587bf3972d00f9fd843e2e23e46558268c656e08fcaddbc59d5808" exitCode=0 Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.913420 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-fws8p" event={"ID":"d3a6858a-513f-4844-a1aa-0d9ad98d24fb","Type":"ContainerDied","Data":"425ea330c8587bf3972d00f9fd843e2e23e46558268c656e08fcaddbc59d5808"} Dec 06 05:44:55 crc kubenswrapper[4706]: I1206 05:44:55.913465 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-fws8p" event={"ID":"d3a6858a-513f-4844-a1aa-0d9ad98d24fb","Type":"ContainerStarted","Data":"a1aee67aa4b27a58f5a8ba55146363910008e8c56b87d25678d27c6c3b389f9a"} Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.336039 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.343014 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486424 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-swiftconf\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486496 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-scripts\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486528 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run-ovn\") pod \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486547 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7dsrw\" (UniqueName: \"kubernetes.io/projected/abd1400e-de80-48fe-bad4-3e3c3af98355-kube-api-access-7dsrw\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486592 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-log-ovn\") pod \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486637 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zw8g5\" (UniqueName: \"kubernetes.io/projected/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-kube-api-access-zw8g5\") pod \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486704 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-combined-ca-bundle\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486730 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-ring-data-devices\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486746 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-scripts\") pod \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486763 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-additional-scripts\") pod \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486780 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run\") pod \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\" (UID: \"d3a6858a-513f-4844-a1aa-0d9ad98d24fb\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486819 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/abd1400e-de80-48fe-bad4-3e3c3af98355-etc-swift\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486858 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-dispersionconf\") pod \"abd1400e-de80-48fe-bad4-3e3c3af98355\" (UID: \"abd1400e-de80-48fe-bad4-3e3c3af98355\") " Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486878 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d3a6858a-513f-4844-a1aa-0d9ad98d24fb" (UID: "d3a6858a-513f-4844-a1aa-0d9ad98d24fb"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.486969 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d3a6858a-513f-4844-a1aa-0d9ad98d24fb" (UID: "d3a6858a-513f-4844-a1aa-0d9ad98d24fb"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.487009 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run" (OuterVolumeSpecName: "var-run") pod "d3a6858a-513f-4844-a1aa-0d9ad98d24fb" (UID: "d3a6858a-513f-4844-a1aa-0d9ad98d24fb"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.487533 4706 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run-ovn\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.487553 4706 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-log-ovn\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.487565 4706 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-var-run\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.488168 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.488462 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/abd1400e-de80-48fe-bad4-3e3c3af98355-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.488728 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-scripts" (OuterVolumeSpecName: "scripts") pod "d3a6858a-513f-4844-a1aa-0d9ad98d24fb" (UID: "d3a6858a-513f-4844-a1aa-0d9ad98d24fb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.489485 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d3a6858a-513f-4844-a1aa-0d9ad98d24fb" (UID: "d3a6858a-513f-4844-a1aa-0d9ad98d24fb"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.493980 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abd1400e-de80-48fe-bad4-3e3c3af98355-kube-api-access-7dsrw" (OuterVolumeSpecName: "kube-api-access-7dsrw") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "kube-api-access-7dsrw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.494631 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-kube-api-access-zw8g5" (OuterVolumeSpecName: "kube-api-access-zw8g5") pod "d3a6858a-513f-4844-a1aa-0d9ad98d24fb" (UID: "d3a6858a-513f-4844-a1aa-0d9ad98d24fb"). InnerVolumeSpecName "kube-api-access-zw8g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.501516 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.510932 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-scripts" (OuterVolumeSpecName: "scripts") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.514469 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.515824 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "abd1400e-de80-48fe-bad4-3e3c3af98355" (UID: "abd1400e-de80-48fe-bad4-3e3c3af98355"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.588919 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zw8g5\" (UniqueName: \"kubernetes.io/projected/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-kube-api-access-zw8g5\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.588960 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.588973 4706 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-ring-data-devices\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.588984 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.588992 4706 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d3a6858a-513f-4844-a1aa-0d9ad98d24fb-additional-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.589002 4706 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/abd1400e-de80-48fe-bad4-3e3c3af98355-etc-swift\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.589011 4706 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-dispersionconf\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.589020 4706 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/abd1400e-de80-48fe-bad4-3e3c3af98355-swiftconf\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.589029 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/abd1400e-de80-48fe-bad4-3e3c3af98355-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.589038 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7dsrw\" (UniqueName: \"kubernetes.io/projected/abd1400e-de80-48fe-bad4-3e3c3af98355-kube-api-access-7dsrw\") on node \"crc\" DevicePath \"\"" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.932768 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-cbrg2-config-fws8p" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.932811 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-cbrg2-config-fws8p" event={"ID":"d3a6858a-513f-4844-a1aa-0d9ad98d24fb","Type":"ContainerDied","Data":"a1aee67aa4b27a58f5a8ba55146363910008e8c56b87d25678d27c6c3b389f9a"} Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.932931 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1aee67aa4b27a58f5a8ba55146363910008e8c56b87d25678d27c6c3b389f9a" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.934743 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-9pw6t" event={"ID":"abd1400e-de80-48fe-bad4-3e3c3af98355","Type":"ContainerDied","Data":"74a34cbe66ca0c138b5ddc996007956679926e496790cb0d58ec0c8eda610c69"} Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.934792 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74a34cbe66ca0c138b5ddc996007956679926e496790cb0d58ec0c8eda610c69" Dec 06 05:44:57 crc kubenswrapper[4706]: I1206 05:44:57.934916 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-9pw6t" Dec 06 05:44:58 crc kubenswrapper[4706]: I1206 05:44:58.426621 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-cbrg2-config-fws8p"] Dec 06 05:44:58 crc kubenswrapper[4706]: I1206 05:44:58.434094 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-cbrg2-config-fws8p"] Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.105165 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-d65f-account-create-update-bpbpn"] Dec 06 05:44:59 crc kubenswrapper[4706]: E1206 05:44:59.106005 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3a6858a-513f-4844-a1aa-0d9ad98d24fb" containerName="ovn-config" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.106259 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3a6858a-513f-4844-a1aa-0d9ad98d24fb" containerName="ovn-config" Dec 06 05:44:59 crc kubenswrapper[4706]: E1206 05:44:59.106450 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abd1400e-de80-48fe-bad4-3e3c3af98355" containerName="swift-ring-rebalance" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.106546 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="abd1400e-de80-48fe-bad4-3e3c3af98355" containerName="swift-ring-rebalance" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.106938 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="abd1400e-de80-48fe-bad4-3e3c3af98355" containerName="swift-ring-rebalance" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.106980 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3a6858a-513f-4844-a1aa-0d9ad98d24fb" containerName="ovn-config" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.107742 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.110295 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.112940 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d65f-account-create-update-bpbpn"] Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.115074 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-operator-scripts\") pod \"glance-d65f-account-create-update-bpbpn\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.192310 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2rfsr"] Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.193393 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.204527 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2rfsr"] Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.217359 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0b7d48d-3568-4c7a-909a-210e079a3a1b-operator-scripts\") pod \"glance-db-create-2rfsr\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.217410 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzmmv\" (UniqueName: \"kubernetes.io/projected/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-kube-api-access-mzmmv\") pod \"glance-d65f-account-create-update-bpbpn\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.217501 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-operator-scripts\") pod \"glance-d65f-account-create-update-bpbpn\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.217617 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrwqn\" (UniqueName: \"kubernetes.io/projected/d0b7d48d-3568-4c7a-909a-210e079a3a1b-kube-api-access-wrwqn\") pod \"glance-db-create-2rfsr\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.218172 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-operator-scripts\") pod \"glance-d65f-account-create-update-bpbpn\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.318915 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrwqn\" (UniqueName: \"kubernetes.io/projected/d0b7d48d-3568-4c7a-909a-210e079a3a1b-kube-api-access-wrwqn\") pod \"glance-db-create-2rfsr\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.318985 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0b7d48d-3568-4c7a-909a-210e079a3a1b-operator-scripts\") pod \"glance-db-create-2rfsr\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.319017 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzmmv\" (UniqueName: \"kubernetes.io/projected/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-kube-api-access-mzmmv\") pod \"glance-d65f-account-create-update-bpbpn\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.319705 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0b7d48d-3568-4c7a-909a-210e079a3a1b-operator-scripts\") pod \"glance-db-create-2rfsr\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.337329 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzmmv\" (UniqueName: \"kubernetes.io/projected/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-kube-api-access-mzmmv\") pod \"glance-d65f-account-create-update-bpbpn\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.337661 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrwqn\" (UniqueName: \"kubernetes.io/projected/d0b7d48d-3568-4c7a-909a-210e079a3a1b-kube-api-access-wrwqn\") pod \"glance-db-create-2rfsr\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.424955 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.508629 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2rfsr" Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.864306 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-d65f-account-create-update-bpbpn"] Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.952562 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d65f-account-create-update-bpbpn" event={"ID":"f6351b3a-0675-4cf0-a1dd-fe6d80cef630","Type":"ContainerStarted","Data":"c5b44fcf2646d590c10e0cd7939bc45acbf4a8c1256fc179e9431639950e0ebf"} Dec 06 05:44:59 crc kubenswrapper[4706]: I1206 05:44:59.957165 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2rfsr"] Dec 06 05:44:59 crc kubenswrapper[4706]: W1206 05:44:59.960404 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0b7d48d_3568_4c7a_909a_210e079a3a1b.slice/crio-22cb170ebc62541d5e5f9ede450ec4128d5976a3228d11737ee8d344a388765d WatchSource:0}: Error finding container 22cb170ebc62541d5e5f9ede450ec4128d5976a3228d11737ee8d344a388765d: Status 404 returned error can't find the container with id 22cb170ebc62541d5e5f9ede450ec4128d5976a3228d11737ee8d344a388765d Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.047148 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3a6858a-513f-4844-a1aa-0d9ad98d24fb" path="/var/lib/kubelet/pods/d3a6858a-513f-4844-a1aa-0d9ad98d24fb/volumes" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.134086 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw"] Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.137706 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.140807 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.141006 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.143723 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw"] Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.234537 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v5hp\" (UniqueName: \"kubernetes.io/projected/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-kube-api-access-8v5hp\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.234620 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-secret-volume\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.234665 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-config-volume\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.336567 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v5hp\" (UniqueName: \"kubernetes.io/projected/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-kube-api-access-8v5hp\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.336637 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-secret-volume\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.336670 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-config-volume\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.337991 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-config-volume\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.345652 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-secret-volume\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.358415 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v5hp\" (UniqueName: \"kubernetes.io/projected/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-kube-api-access-8v5hp\") pod \"collect-profiles-29416665-vplcw\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.505562 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.800310 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.937333 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw"] Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.966491 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2rfsr" event={"ID":"d0b7d48d-3568-4c7a-909a-210e079a3a1b","Type":"ContainerStarted","Data":"22cb170ebc62541d5e5f9ede450ec4128d5976a3228d11737ee8d344a388765d"} Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.972243 4706 generic.go:334] "Generic (PLEG): container finished" podID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerID="3af9c99284043d95d1d0e700f9d3e8775e1b02554878dda547e21c5836505241" exitCode=0 Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.972327 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e","Type":"ContainerDied","Data":"3af9c99284043d95d1d0e700f9d3e8775e1b02554878dda547e21c5836505241"} Dec 06 05:45:00 crc kubenswrapper[4706]: I1206 05:45:00.976175 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" event={"ID":"8f9e636a-ddae-4169-b6a0-f00f304bbeaa","Type":"ContainerStarted","Data":"0d093e6ca6ff24dd1f4ead558b20e7d708fd47f514ed0d7c2a0a82a15977a952"} Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.152209 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-g9s87"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.156551 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.165283 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-g9s87"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.173097 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-245b-account-create-update-fr24l"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.174141 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.179694 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.188219 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-245b-account-create-update-fr24l"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.246254 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-2nftm"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.247235 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.252421 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-operator-scripts\") pod \"cinder-db-create-g9s87\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.252508 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jfjq\" (UniqueName: \"kubernetes.io/projected/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-kube-api-access-2jfjq\") pod \"cinder-db-create-g9s87\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.252963 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-2nftm"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.329222 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f39c-account-create-update-xpw6m"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.330333 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.334137 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.339572 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f39c-account-create-update-xpw6m"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.354113 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65da32e7-79d5-4b2a-937c-8890711c77f4-operator-scripts\") pod \"cinder-245b-account-create-update-fr24l\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.354159 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jfjq\" (UniqueName: \"kubernetes.io/projected/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-kube-api-access-2jfjq\") pod \"cinder-db-create-g9s87\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.354191 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f644703-222c-4d96-8473-8856ee25fb91-operator-scripts\") pod \"barbican-db-create-2nftm\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.354335 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj6m9\" (UniqueName: \"kubernetes.io/projected/8f644703-222c-4d96-8473-8856ee25fb91-kube-api-access-vj6m9\") pod \"barbican-db-create-2nftm\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.354373 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsm7t\" (UniqueName: \"kubernetes.io/projected/65da32e7-79d5-4b2a-937c-8890711c77f4-kube-api-access-bsm7t\") pod \"cinder-245b-account-create-update-fr24l\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.354504 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-operator-scripts\") pod \"cinder-db-create-g9s87\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.355382 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-operator-scripts\") pod \"cinder-db-create-g9s87\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.383586 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jfjq\" (UniqueName: \"kubernetes.io/projected/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-kube-api-access-2jfjq\") pod \"cinder-db-create-g9s87\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.432248 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-nwqqh"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.433608 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.441912 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-nwqqh"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.465824 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65da32e7-79d5-4b2a-937c-8890711c77f4-operator-scripts\") pod \"cinder-245b-account-create-update-fr24l\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.466596 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65da32e7-79d5-4b2a-937c-8890711c77f4-operator-scripts\") pod \"cinder-245b-account-create-update-fr24l\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.466714 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f644703-222c-4d96-8473-8856ee25fb91-operator-scripts\") pod \"barbican-db-create-2nftm\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.467238 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f644703-222c-4d96-8473-8856ee25fb91-operator-scripts\") pod \"barbican-db-create-2nftm\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.467285 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8b901a2-272b-4021-b8fc-f2e0051e68ce-operator-scripts\") pod \"barbican-f39c-account-create-update-xpw6m\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.467356 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj6m9\" (UniqueName: \"kubernetes.io/projected/8f644703-222c-4d96-8473-8856ee25fb91-kube-api-access-vj6m9\") pod \"barbican-db-create-2nftm\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.467380 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsm7t\" (UniqueName: \"kubernetes.io/projected/65da32e7-79d5-4b2a-937c-8890711c77f4-kube-api-access-bsm7t\") pod \"cinder-245b-account-create-update-fr24l\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.467520 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfw9p\" (UniqueName: \"kubernetes.io/projected/e8b901a2-272b-4021-b8fc-f2e0051e68ce-kube-api-access-wfw9p\") pod \"barbican-f39c-account-create-update-xpw6m\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.484950 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj6m9\" (UniqueName: \"kubernetes.io/projected/8f644703-222c-4d96-8473-8856ee25fb91-kube-api-access-vj6m9\") pod \"barbican-db-create-2nftm\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.485623 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsm7t\" (UniqueName: \"kubernetes.io/projected/65da32e7-79d5-4b2a-937c-8890711c77f4-kube-api-access-bsm7t\") pod \"cinder-245b-account-create-update-fr24l\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.505282 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.539611 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-3a0e-account-create-update-ctxbc"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.540645 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.543027 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.545001 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.552620 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3a0e-account-create-update-ctxbc"] Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.569064 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c22tz\" (UniqueName: \"kubernetes.io/projected/4f408368-233a-4ada-86e7-d4125b2a1bb2-kube-api-access-c22tz\") pod \"neutron-db-create-nwqqh\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.569117 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfw9p\" (UniqueName: \"kubernetes.io/projected/e8b901a2-272b-4021-b8fc-f2e0051e68ce-kube-api-access-wfw9p\") pod \"barbican-f39c-account-create-update-xpw6m\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.569180 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f408368-233a-4ada-86e7-d4125b2a1bb2-operator-scripts\") pod \"neutron-db-create-nwqqh\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.569257 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8b901a2-272b-4021-b8fc-f2e0051e68ce-operator-scripts\") pod \"barbican-f39c-account-create-update-xpw6m\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.570130 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8b901a2-272b-4021-b8fc-f2e0051e68ce-operator-scripts\") pod \"barbican-f39c-account-create-update-xpw6m\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.589207 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.591345 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfw9p\" (UniqueName: \"kubernetes.io/projected/e8b901a2-272b-4021-b8fc-f2e0051e68ce-kube-api-access-wfw9p\") pod \"barbican-f39c-account-create-update-xpw6m\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.670467 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z87fb\" (UniqueName: \"kubernetes.io/projected/ed44107e-4d92-4895-961b-4cce9234319c-kube-api-access-z87fb\") pod \"neutron-3a0e-account-create-update-ctxbc\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.670849 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c22tz\" (UniqueName: \"kubernetes.io/projected/4f408368-233a-4ada-86e7-d4125b2a1bb2-kube-api-access-c22tz\") pod \"neutron-db-create-nwqqh\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.670889 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed44107e-4d92-4895-961b-4cce9234319c-operator-scripts\") pod \"neutron-3a0e-account-create-update-ctxbc\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.670908 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f408368-233a-4ada-86e7-d4125b2a1bb2-operator-scripts\") pod \"neutron-db-create-nwqqh\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.671858 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f408368-233a-4ada-86e7-d4125b2a1bb2-operator-scripts\") pod \"neutron-db-create-nwqqh\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.694985 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c22tz\" (UniqueName: \"kubernetes.io/projected/4f408368-233a-4ada-86e7-d4125b2a1bb2-kube-api-access-c22tz\") pod \"neutron-db-create-nwqqh\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.733463 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.749277 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.772313 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z87fb\" (UniqueName: \"kubernetes.io/projected/ed44107e-4d92-4895-961b-4cce9234319c-kube-api-access-z87fb\") pod \"neutron-3a0e-account-create-update-ctxbc\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.772412 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed44107e-4d92-4895-961b-4cce9234319c-operator-scripts\") pod \"neutron-3a0e-account-create-update-ctxbc\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.773140 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed44107e-4d92-4895-961b-4cce9234319c-operator-scripts\") pod \"neutron-3a0e-account-create-update-ctxbc\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.791802 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z87fb\" (UniqueName: \"kubernetes.io/projected/ed44107e-4d92-4895-961b-4cce9234319c-kube-api-access-z87fb\") pod \"neutron-3a0e-account-create-update-ctxbc\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.987034 4706 generic.go:334] "Generic (PLEG): container finished" podID="8f9e636a-ddae-4169-b6a0-f00f304bbeaa" containerID="a465b7a27d5d5e76fdba5a76ce099b0fddfa69b0d9895eed51f0c98288086d5d" exitCode=0 Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.987489 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" event={"ID":"8f9e636a-ddae-4169-b6a0-f00f304bbeaa","Type":"ContainerDied","Data":"a465b7a27d5d5e76fdba5a76ce099b0fddfa69b0d9895eed51f0c98288086d5d"} Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.990059 4706 generic.go:334] "Generic (PLEG): container finished" podID="f6351b3a-0675-4cf0-a1dd-fe6d80cef630" containerID="7372b11b0d793e0c4ed3badda996c3d9e958a6bbd1443ee7f493a37a14ef57d6" exitCode=0 Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.990121 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d65f-account-create-update-bpbpn" event={"ID":"f6351b3a-0675-4cf0-a1dd-fe6d80cef630","Type":"ContainerDied","Data":"7372b11b0d793e0c4ed3badda996c3d9e958a6bbd1443ee7f493a37a14ef57d6"} Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.996755 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.999420 4706 generic.go:334] "Generic (PLEG): container finished" podID="d0b7d48d-3568-4c7a-909a-210e079a3a1b" containerID="824f2d7eacdae14d064a6787d0e8a685686da802e900559bf915c1c4f03c0cad" exitCode=0 Dec 06 05:45:01 crc kubenswrapper[4706]: I1206 05:45:01.999495 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2rfsr" event={"ID":"d0b7d48d-3568-4c7a-909a-210e079a3a1b","Type":"ContainerDied","Data":"824f2d7eacdae14d064a6787d0e8a685686da802e900559bf915c1c4f03c0cad"} Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.014375 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e","Type":"ContainerStarted","Data":"03b7e3b2fa72f543f655f0cf67b89d244c143d11bda2a8d13e353c58416bffa8"} Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.015542 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.143443 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371953.71135 podStartE2EDuration="1m23.143426969s" podCreationTimestamp="2025-12-06 05:43:39 +0000 UTC" firstStartedPulling="2025-12-06 05:43:55.755779212 +0000 UTC m=+1458.083603156" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:02.13941516 +0000 UTC m=+1524.467239124" watchObservedRunningTime="2025-12-06 05:45:02.143426969 +0000 UTC m=+1524.471250913" Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.177496 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-g9s87"] Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.203302 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-2nftm"] Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.240361 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-245b-account-create-update-fr24l"] Dec 06 05:45:02 crc kubenswrapper[4706]: W1206 05:45:02.256638 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod65da32e7_79d5_4b2a_937c_8890711c77f4.slice/crio-e5cb9a59c4f095154ff48e0bf5c2f402f8bd5786fc47ab47bd4dcb948a30faa0 WatchSource:0}: Error finding container e5cb9a59c4f095154ff48e0bf5c2f402f8bd5786fc47ab47bd4dcb948a30faa0: Status 404 returned error can't find the container with id e5cb9a59c4f095154ff48e0bf5c2f402f8bd5786fc47ab47bd4dcb948a30faa0 Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.445735 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f39c-account-create-update-xpw6m"] Dec 06 05:45:02 crc kubenswrapper[4706]: W1206 05:45:02.451530 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8b901a2_272b_4021_b8fc_f2e0051e68ce.slice/crio-0f973009b6201e0626131e0ccc58fd434007214af151d3943d138351d3d1366f WatchSource:0}: Error finding container 0f973009b6201e0626131e0ccc58fd434007214af151d3943d138351d3d1366f: Status 404 returned error can't find the container with id 0f973009b6201e0626131e0ccc58fd434007214af151d3943d138351d3d1366f Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.463230 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-nwqqh"] Dec 06 05:45:02 crc kubenswrapper[4706]: I1206 05:45:02.611118 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-3a0e-account-create-update-ctxbc"] Dec 06 05:45:02 crc kubenswrapper[4706]: W1206 05:45:02.620451 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poded44107e_4d92_4895_961b_4cce9234319c.slice/crio-61c210f4eb72d1d08d0a762ad32e3fd55af736a4eff30dc45bf36d3ac948703e WatchSource:0}: Error finding container 61c210f4eb72d1d08d0a762ad32e3fd55af736a4eff30dc45bf36d3ac948703e: Status 404 returned error can't find the container with id 61c210f4eb72d1d08d0a762ad32e3fd55af736a4eff30dc45bf36d3ac948703e Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.021930 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3a0e-account-create-update-ctxbc" event={"ID":"ed44107e-4d92-4895-961b-4cce9234319c","Type":"ContainerStarted","Data":"1b84902badaf8dc9269c5bff4c84f4bf4adfbc7a769092875c7c660e3d3f202c"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.022256 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3a0e-account-create-update-ctxbc" event={"ID":"ed44107e-4d92-4895-961b-4cce9234319c","Type":"ContainerStarted","Data":"61c210f4eb72d1d08d0a762ad32e3fd55af736a4eff30dc45bf36d3ac948703e"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.024370 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-245b-account-create-update-fr24l" event={"ID":"65da32e7-79d5-4b2a-937c-8890711c77f4","Type":"ContainerStarted","Data":"f04742d6b45408f5cdc0a29d85709adfd76649cd14446123eabeacffc7dbcfb6"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.024397 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-245b-account-create-update-fr24l" event={"ID":"65da32e7-79d5-4b2a-937c-8890711c77f4","Type":"ContainerStarted","Data":"e5cb9a59c4f095154ff48e0bf5c2f402f8bd5786fc47ab47bd4dcb948a30faa0"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.026032 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f39c-account-create-update-xpw6m" event={"ID":"e8b901a2-272b-4021-b8fc-f2e0051e68ce","Type":"ContainerStarted","Data":"757c5f7569f581dcd7aa8beeecddc19ffc2346ec6e6957fdc9b95860d1141a7a"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.026068 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f39c-account-create-update-xpw6m" event={"ID":"e8b901a2-272b-4021-b8fc-f2e0051e68ce","Type":"ContainerStarted","Data":"0f973009b6201e0626131e0ccc58fd434007214af151d3943d138351d3d1366f"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.028240 4706 generic.go:334] "Generic (PLEG): container finished" podID="8f644703-222c-4d96-8473-8856ee25fb91" containerID="bdc8b764068139ac803c9b9f0f609fb64cf6246f8b3bdab9309e44ff0058e233" exitCode=0 Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.028359 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2nftm" event={"ID":"8f644703-222c-4d96-8473-8856ee25fb91","Type":"ContainerDied","Data":"bdc8b764068139ac803c9b9f0f609fb64cf6246f8b3bdab9309e44ff0058e233"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.028393 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2nftm" event={"ID":"8f644703-222c-4d96-8473-8856ee25fb91","Type":"ContainerStarted","Data":"71ca17a51d52450a64114ff49713dab220b03677ef6a3111ae35deb0e9c091c3"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.030143 4706 generic.go:334] "Generic (PLEG): container finished" podID="6d9537ae-09b5-49d5-a0e5-6d8e6e992170" containerID="e330223da4a0f81d4de8b3096784be3e084112807ae22f2d3bff09d68c40af7f" exitCode=0 Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.030209 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-g9s87" event={"ID":"6d9537ae-09b5-49d5-a0e5-6d8e6e992170","Type":"ContainerDied","Data":"e330223da4a0f81d4de8b3096784be3e084112807ae22f2d3bff09d68c40af7f"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.030237 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-g9s87" event={"ID":"6d9537ae-09b5-49d5-a0e5-6d8e6e992170","Type":"ContainerStarted","Data":"a3ad58e773e78026f4ce774fa03701e0ad660cb56fd79e170804eedd9cf8cc9e"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.031669 4706 generic.go:334] "Generic (PLEG): container finished" podID="4f408368-233a-4ada-86e7-d4125b2a1bb2" containerID="aaa55450163d9bee941bd72c27cd9273c2c582af776937dab02384a3670e1ab0" exitCode=0 Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.031744 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nwqqh" event={"ID":"4f408368-233a-4ada-86e7-d4125b2a1bb2","Type":"ContainerDied","Data":"aaa55450163d9bee941bd72c27cd9273c2c582af776937dab02384a3670e1ab0"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.031769 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nwqqh" event={"ID":"4f408368-233a-4ada-86e7-d4125b2a1bb2","Type":"ContainerStarted","Data":"92eca72a6269d427410a9e521cde2154a1f2932040c811ff740c29ae06d0104b"} Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.048856 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-3a0e-account-create-update-ctxbc" podStartSLOduration=2.048835613 podStartE2EDuration="2.048835613s" podCreationTimestamp="2025-12-06 05:45:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:03.046234492 +0000 UTC m=+1525.374058456" watchObservedRunningTime="2025-12-06 05:45:03.048835613 +0000 UTC m=+1525.376659557" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.073587 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-245b-account-create-update-fr24l" podStartSLOduration=2.073567602 podStartE2EDuration="2.073567602s" podCreationTimestamp="2025-12-06 05:45:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:03.072383869 +0000 UTC m=+1525.400207813" watchObservedRunningTime="2025-12-06 05:45:03.073567602 +0000 UTC m=+1525.401391546" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.143474 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-f39c-account-create-update-xpw6m" podStartSLOduration=2.143456323 podStartE2EDuration="2.143456323s" podCreationTimestamp="2025-12-06 05:45:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:03.138989401 +0000 UTC m=+1525.466813345" watchObservedRunningTime="2025-12-06 05:45:03.143456323 +0000 UTC m=+1525.471280267" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.469269 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-tnbqb"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.471547 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.490520 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-tnbqb"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.514272 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlvct\" (UniqueName: \"kubernetes.io/projected/6ebc347a-c311-41c2-bed4-0fcd22e26342-kube-api-access-xlvct\") pod \"keystone-db-create-tnbqb\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.514440 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ebc347a-c311-41c2-bed4-0fcd22e26342-operator-scripts\") pod \"keystone-db-create-tnbqb\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.558659 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-ab7c-account-create-update-knbmr"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.559784 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.561455 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.562573 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.587092 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2rfsr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.587844 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ab7c-account-create-update-knbmr"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.600213 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.621335 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0b7d48d-3568-4c7a-909a-210e079a3a1b-operator-scripts\") pod \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.621396 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrwqn\" (UniqueName: \"kubernetes.io/projected/d0b7d48d-3568-4c7a-909a-210e079a3a1b-kube-api-access-wrwqn\") pod \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\" (UID: \"d0b7d48d-3568-4c7a-909a-210e079a3a1b\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.621473 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-operator-scripts\") pod \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.621539 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzmmv\" (UniqueName: \"kubernetes.io/projected/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-kube-api-access-mzmmv\") pod \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\" (UID: \"f6351b3a-0675-4cf0-a1dd-fe6d80cef630\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.623027 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0b7d48d-3568-4c7a-909a-210e079a3a1b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d0b7d48d-3568-4c7a-909a-210e079a3a1b" (UID: "d0b7d48d-3568-4c7a-909a-210e079a3a1b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.623397 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f6351b3a-0675-4cf0-a1dd-fe6d80cef630" (UID: "f6351b3a-0675-4cf0-a1dd-fe6d80cef630"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.632462 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-kube-api-access-mzmmv" (OuterVolumeSpecName: "kube-api-access-mzmmv") pod "f6351b3a-0675-4cf0-a1dd-fe6d80cef630" (UID: "f6351b3a-0675-4cf0-a1dd-fe6d80cef630"). InnerVolumeSpecName "kube-api-access-mzmmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.635624 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ebc347a-c311-41c2-bed4-0fcd22e26342-operator-scripts\") pod \"keystone-db-create-tnbqb\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.635717 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1854690-b0e0-4052-95a6-6951260cdb0b-operator-scripts\") pod \"keystone-ab7c-account-create-update-knbmr\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.635798 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlvct\" (UniqueName: \"kubernetes.io/projected/6ebc347a-c311-41c2-bed4-0fcd22e26342-kube-api-access-xlvct\") pod \"keystone-db-create-tnbqb\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.635841 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88xm4\" (UniqueName: \"kubernetes.io/projected/c1854690-b0e0-4052-95a6-6951260cdb0b-kube-api-access-88xm4\") pod \"keystone-ab7c-account-create-update-knbmr\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.636062 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d0b7d48d-3568-4c7a-909a-210e079a3a1b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.636080 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.636091 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzmmv\" (UniqueName: \"kubernetes.io/projected/f6351b3a-0675-4cf0-a1dd-fe6d80cef630-kube-api-access-mzmmv\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.636905 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ebc347a-c311-41c2-bed4-0fcd22e26342-operator-scripts\") pod \"keystone-db-create-tnbqb\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.639528 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0b7d48d-3568-4c7a-909a-210e079a3a1b-kube-api-access-wrwqn" (OuterVolumeSpecName: "kube-api-access-wrwqn") pod "d0b7d48d-3568-4c7a-909a-210e079a3a1b" (UID: "d0b7d48d-3568-4c7a-909a-210e079a3a1b"). InnerVolumeSpecName "kube-api-access-wrwqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.664795 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlvct\" (UniqueName: \"kubernetes.io/projected/6ebc347a-c311-41c2-bed4-0fcd22e26342-kube-api-access-xlvct\") pod \"keystone-db-create-tnbqb\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.737028 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v5hp\" (UniqueName: \"kubernetes.io/projected/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-kube-api-access-8v5hp\") pod \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.737107 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-config-volume\") pod \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.738082 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-config-volume" (OuterVolumeSpecName: "config-volume") pod "8f9e636a-ddae-4169-b6a0-f00f304bbeaa" (UID: "8f9e636a-ddae-4169-b6a0-f00f304bbeaa"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.738135 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-secret-volume\") pod \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\" (UID: \"8f9e636a-ddae-4169-b6a0-f00f304bbeaa\") " Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.738491 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88xm4\" (UniqueName: \"kubernetes.io/projected/c1854690-b0e0-4052-95a6-6951260cdb0b-kube-api-access-88xm4\") pod \"keystone-ab7c-account-create-update-knbmr\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.738744 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1854690-b0e0-4052-95a6-6951260cdb0b-operator-scripts\") pod \"keystone-ab7c-account-create-update-knbmr\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.739951 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1854690-b0e0-4052-95a6-6951260cdb0b-operator-scripts\") pod \"keystone-ab7c-account-create-update-knbmr\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.740186 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.740212 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrwqn\" (UniqueName: \"kubernetes.io/projected/d0b7d48d-3568-4c7a-909a-210e079a3a1b-kube-api-access-wrwqn\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.740252 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8f9e636a-ddae-4169-b6a0-f00f304bbeaa" (UID: "8f9e636a-ddae-4169-b6a0-f00f304bbeaa"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.741245 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-kube-api-access-8v5hp" (OuterVolumeSpecName: "kube-api-access-8v5hp") pod "8f9e636a-ddae-4169-b6a0-f00f304bbeaa" (UID: "8f9e636a-ddae-4169-b6a0-f00f304bbeaa"). InnerVolumeSpecName "kube-api-access-8v5hp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.753669 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88xm4\" (UniqueName: \"kubernetes.io/projected/c1854690-b0e0-4052-95a6-6951260cdb0b-kube-api-access-88xm4\") pod \"keystone-ab7c-account-create-update-knbmr\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.808119 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-ks7jq"] Dec 06 05:45:03 crc kubenswrapper[4706]: E1206 05:45:03.808808 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f6351b3a-0675-4cf0-a1dd-fe6d80cef630" containerName="mariadb-account-create-update" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.808849 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f6351b3a-0675-4cf0-a1dd-fe6d80cef630" containerName="mariadb-account-create-update" Dec 06 05:45:03 crc kubenswrapper[4706]: E1206 05:45:03.808870 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f9e636a-ddae-4169-b6a0-f00f304bbeaa" containerName="collect-profiles" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.808887 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f9e636a-ddae-4169-b6a0-f00f304bbeaa" containerName="collect-profiles" Dec 06 05:45:03 crc kubenswrapper[4706]: E1206 05:45:03.808950 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0b7d48d-3568-4c7a-909a-210e079a3a1b" containerName="mariadb-database-create" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.808969 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0b7d48d-3568-4c7a-909a-210e079a3a1b" containerName="mariadb-database-create" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.809387 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0b7d48d-3568-4c7a-909a-210e079a3a1b" containerName="mariadb-database-create" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.809508 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f9e636a-ddae-4169-b6a0-f00f304bbeaa" containerName="collect-profiles" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.809588 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f6351b3a-0675-4cf0-a1dd-fe6d80cef630" containerName="mariadb-account-create-update" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.810785 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.815692 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ks7jq"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.841654 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v5hp\" (UniqueName: \"kubernetes.io/projected/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-kube-api-access-8v5hp\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.841708 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8f9e636a-ddae-4169-b6a0-f00f304bbeaa-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.883560 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.915124 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.919965 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-bd4e-account-create-update-g6ws8"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.921065 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.923743 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.929268 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-bd4e-account-create-update-g6ws8"] Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.943617 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/336ef851-faf5-4c3d-ad1f-316af4dedd9a-operator-scripts\") pod \"placement-db-create-ks7jq\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:03 crc kubenswrapper[4706]: I1206 05:45:03.943857 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7xxn6\" (UniqueName: \"kubernetes.io/projected/336ef851-faf5-4c3d-ad1f-316af4dedd9a-kube-api-access-7xxn6\") pod \"placement-db-create-ks7jq\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.045023 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2q77t\" (UniqueName: \"kubernetes.io/projected/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-kube-api-access-2q77t\") pod \"placement-bd4e-account-create-update-g6ws8\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.045098 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/336ef851-faf5-4c3d-ad1f-316af4dedd9a-operator-scripts\") pod \"placement-db-create-ks7jq\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.045223 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-operator-scripts\") pod \"placement-bd4e-account-create-update-g6ws8\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.045270 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7xxn6\" (UniqueName: \"kubernetes.io/projected/336ef851-faf5-4c3d-ad1f-316af4dedd9a-kube-api-access-7xxn6\") pod \"placement-db-create-ks7jq\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.046242 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/336ef851-faf5-4c3d-ad1f-316af4dedd9a-operator-scripts\") pod \"placement-db-create-ks7jq\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.068869 4706 generic.go:334] "Generic (PLEG): container finished" podID="ed44107e-4d92-4895-961b-4cce9234319c" containerID="1b84902badaf8dc9269c5bff4c84f4bf4adfbc7a769092875c7c660e3d3f202c" exitCode=0 Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.068950 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3a0e-account-create-update-ctxbc" event={"ID":"ed44107e-4d92-4895-961b-4cce9234319c","Type":"ContainerDied","Data":"1b84902badaf8dc9269c5bff4c84f4bf4adfbc7a769092875c7c660e3d3f202c"} Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.072641 4706 generic.go:334] "Generic (PLEG): container finished" podID="65da32e7-79d5-4b2a-937c-8890711c77f4" containerID="f04742d6b45408f5cdc0a29d85709adfd76649cd14446123eabeacffc7dbcfb6" exitCode=0 Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.072744 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-245b-account-create-update-fr24l" event={"ID":"65da32e7-79d5-4b2a-937c-8890711c77f4","Type":"ContainerDied","Data":"f04742d6b45408f5cdc0a29d85709adfd76649cd14446123eabeacffc7dbcfb6"} Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.074324 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" event={"ID":"8f9e636a-ddae-4169-b6a0-f00f304bbeaa","Type":"ContainerDied","Data":"0d093e6ca6ff24dd1f4ead558b20e7d708fd47f514ed0d7c2a0a82a15977a952"} Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.074350 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d093e6ca6ff24dd1f4ead558b20e7d708fd47f514ed0d7c2a0a82a15977a952" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.074333 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.074820 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7xxn6\" (UniqueName: \"kubernetes.io/projected/336ef851-faf5-4c3d-ad1f-316af4dedd9a-kube-api-access-7xxn6\") pod \"placement-db-create-ks7jq\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.075763 4706 generic.go:334] "Generic (PLEG): container finished" podID="e8b901a2-272b-4021-b8fc-f2e0051e68ce" containerID="757c5f7569f581dcd7aa8beeecddc19ffc2346ec6e6957fdc9b95860d1141a7a" exitCode=0 Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.075874 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f39c-account-create-update-xpw6m" event={"ID":"e8b901a2-272b-4021-b8fc-f2e0051e68ce","Type":"ContainerDied","Data":"757c5f7569f581dcd7aa8beeecddc19ffc2346ec6e6957fdc9b95860d1141a7a"} Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.084675 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2rfsr" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.085362 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2rfsr" event={"ID":"d0b7d48d-3568-4c7a-909a-210e079a3a1b","Type":"ContainerDied","Data":"22cb170ebc62541d5e5f9ede450ec4128d5976a3228d11737ee8d344a388765d"} Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.085400 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="22cb170ebc62541d5e5f9ede450ec4128d5976a3228d11737ee8d344a388765d" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.087196 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-d65f-account-create-update-bpbpn" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.087641 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-d65f-account-create-update-bpbpn" event={"ID":"f6351b3a-0675-4cf0-a1dd-fe6d80cef630","Type":"ContainerDied","Data":"c5b44fcf2646d590c10e0cd7939bc45acbf4a8c1256fc179e9431639950e0ebf"} Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.087669 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c5b44fcf2646d590c10e0cd7939bc45acbf4a8c1256fc179e9431639950e0ebf" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.138461 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.146624 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2q77t\" (UniqueName: \"kubernetes.io/projected/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-kube-api-access-2q77t\") pod \"placement-bd4e-account-create-update-g6ws8\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.146743 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-operator-scripts\") pod \"placement-bd4e-account-create-update-g6ws8\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.147384 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-operator-scripts\") pod \"placement-bd4e-account-create-update-g6ws8\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.172974 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2q77t\" (UniqueName: \"kubernetes.io/projected/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-kube-api-access-2q77t\") pod \"placement-bd4e-account-create-update-g6ws8\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.243333 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.400090 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-tnbqb"] Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.701837 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-ab7c-account-create-update-knbmr"] Dec 06 05:45:04 crc kubenswrapper[4706]: W1206 05:45:04.714370 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc1854690_b0e0_4052_95a6_6951260cdb0b.slice/crio-1894056fe4e023f6a1a99ddf982e82cdd4522a5624bd1d26398daf2bca94eaa7 WatchSource:0}: Error finding container 1894056fe4e023f6a1a99ddf982e82cdd4522a5624bd1d26398daf2bca94eaa7: Status 404 returned error can't find the container with id 1894056fe4e023f6a1a99ddf982e82cdd4522a5624bd1d26398daf2bca94eaa7 Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.724848 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.731427 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.735822 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.859275 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-operator-scripts\") pod \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.859384 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vj6m9\" (UniqueName: \"kubernetes.io/projected/8f644703-222c-4d96-8473-8856ee25fb91-kube-api-access-vj6m9\") pod \"8f644703-222c-4d96-8473-8856ee25fb91\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.859421 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c22tz\" (UniqueName: \"kubernetes.io/projected/4f408368-233a-4ada-86e7-d4125b2a1bb2-kube-api-access-c22tz\") pod \"4f408368-233a-4ada-86e7-d4125b2a1bb2\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.860118 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6d9537ae-09b5-49d5-a0e5-6d8e6e992170" (UID: "6d9537ae-09b5-49d5-a0e5-6d8e6e992170"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.860301 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f644703-222c-4d96-8473-8856ee25fb91-operator-scripts\") pod \"8f644703-222c-4d96-8473-8856ee25fb91\" (UID: \"8f644703-222c-4d96-8473-8856ee25fb91\") " Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.860403 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2jfjq\" (UniqueName: \"kubernetes.io/projected/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-kube-api-access-2jfjq\") pod \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\" (UID: \"6d9537ae-09b5-49d5-a0e5-6d8e6e992170\") " Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.860465 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f408368-233a-4ada-86e7-d4125b2a1bb2-operator-scripts\") pod \"4f408368-233a-4ada-86e7-d4125b2a1bb2\" (UID: \"4f408368-233a-4ada-86e7-d4125b2a1bb2\") " Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.860841 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.860915 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f644703-222c-4d96-8473-8856ee25fb91-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8f644703-222c-4d96-8473-8856ee25fb91" (UID: "8f644703-222c-4d96-8473-8856ee25fb91"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.861063 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4f408368-233a-4ada-86e7-d4125b2a1bb2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4f408368-233a-4ada-86e7-d4125b2a1bb2" (UID: "4f408368-233a-4ada-86e7-d4125b2a1bb2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.867289 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f408368-233a-4ada-86e7-d4125b2a1bb2-kube-api-access-c22tz" (OuterVolumeSpecName: "kube-api-access-c22tz") pod "4f408368-233a-4ada-86e7-d4125b2a1bb2" (UID: "4f408368-233a-4ada-86e7-d4125b2a1bb2"). InnerVolumeSpecName "kube-api-access-c22tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.867337 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-kube-api-access-2jfjq" (OuterVolumeSpecName: "kube-api-access-2jfjq") pod "6d9537ae-09b5-49d5-a0e5-6d8e6e992170" (UID: "6d9537ae-09b5-49d5-a0e5-6d8e6e992170"). InnerVolumeSpecName "kube-api-access-2jfjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.867354 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f644703-222c-4d96-8473-8856ee25fb91-kube-api-access-vj6m9" (OuterVolumeSpecName: "kube-api-access-vj6m9") pod "8f644703-222c-4d96-8473-8856ee25fb91" (UID: "8f644703-222c-4d96-8473-8856ee25fb91"). InnerVolumeSpecName "kube-api-access-vj6m9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.957558 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ks7jq"] Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.961855 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4f408368-233a-4ada-86e7-d4125b2a1bb2-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.961880 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vj6m9\" (UniqueName: \"kubernetes.io/projected/8f644703-222c-4d96-8473-8856ee25fb91-kube-api-access-vj6m9\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.961890 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c22tz\" (UniqueName: \"kubernetes.io/projected/4f408368-233a-4ada-86e7-d4125b2a1bb2-kube-api-access-c22tz\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.961899 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8f644703-222c-4d96-8473-8856ee25fb91-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:04 crc kubenswrapper[4706]: I1206 05:45:04.961907 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2jfjq\" (UniqueName: \"kubernetes.io/projected/6d9537ae-09b5-49d5-a0e5-6d8e6e992170-kube-api-access-2jfjq\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.065278 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-bd4e-account-create-update-g6ws8"] Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.096252 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-bd4e-account-create-update-g6ws8" event={"ID":"5e0f3a5c-0b70-43c6-9b1e-5729f502488e","Type":"ContainerStarted","Data":"f3dfcb499fe7701f7910e53545fc8f91d208144438a0e781ade5f84c5cebe631"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.097640 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ks7jq" event={"ID":"336ef851-faf5-4c3d-ad1f-316af4dedd9a","Type":"ContainerStarted","Data":"31f430e60f569adf4152e3cfba55f1e75880740ba44eb9ee0b24b04750423a4c"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.098981 4706 generic.go:334] "Generic (PLEG): container finished" podID="6ebc347a-c311-41c2-bed4-0fcd22e26342" containerID="b9cde94861c04f85797115def145b09aee430cb3659742748e6d0636b337746c" exitCode=0 Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.099066 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-tnbqb" event={"ID":"6ebc347a-c311-41c2-bed4-0fcd22e26342","Type":"ContainerDied","Data":"b9cde94861c04f85797115def145b09aee430cb3659742748e6d0636b337746c"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.099112 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-tnbqb" event={"ID":"6ebc347a-c311-41c2-bed4-0fcd22e26342","Type":"ContainerStarted","Data":"6373c7b356854a1bb49cb22316974cc91aa16547e80e681a3aec634695d8a299"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.100838 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-nwqqh" event={"ID":"4f408368-233a-4ada-86e7-d4125b2a1bb2","Type":"ContainerDied","Data":"92eca72a6269d427410a9e521cde2154a1f2932040c811ff740c29ae06d0104b"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.100958 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92eca72a6269d427410a9e521cde2154a1f2932040c811ff740c29ae06d0104b" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.100863 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-nwqqh" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.107516 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ab7c-account-create-update-knbmr" event={"ID":"c1854690-b0e0-4052-95a6-6951260cdb0b","Type":"ContainerStarted","Data":"40400c374beb6242eb0b519fbd1c5e73b581aabc02097a6118d3beebc0532375"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.107550 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ab7c-account-create-update-knbmr" event={"ID":"c1854690-b0e0-4052-95a6-6951260cdb0b","Type":"ContainerStarted","Data":"1894056fe4e023f6a1a99ddf982e82cdd4522a5624bd1d26398daf2bca94eaa7"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.111562 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-2nftm" event={"ID":"8f644703-222c-4d96-8473-8856ee25fb91","Type":"ContainerDied","Data":"71ca17a51d52450a64114ff49713dab220b03677ef6a3111ae35deb0e9c091c3"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.111592 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="71ca17a51d52450a64114ff49713dab220b03677ef6a3111ae35deb0e9c091c3" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.111660 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-2nftm" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.116157 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-g9s87" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.118984 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-g9s87" event={"ID":"6d9537ae-09b5-49d5-a0e5-6d8e6e992170","Type":"ContainerDied","Data":"a3ad58e773e78026f4ce774fa03701e0ad660cb56fd79e170804eedd9cf8cc9e"} Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.119034 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3ad58e773e78026f4ce774fa03701e0ad660cb56fd79e170804eedd9cf8cc9e" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.527829 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.536015 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.541925 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.578707 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsm7t\" (UniqueName: \"kubernetes.io/projected/65da32e7-79d5-4b2a-937c-8890711c77f4-kube-api-access-bsm7t\") pod \"65da32e7-79d5-4b2a-937c-8890711c77f4\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.579245 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65da32e7-79d5-4b2a-937c-8890711c77f4-operator-scripts\") pod \"65da32e7-79d5-4b2a-937c-8890711c77f4\" (UID: \"65da32e7-79d5-4b2a-937c-8890711c77f4\") " Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.581499 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65da32e7-79d5-4b2a-937c-8890711c77f4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "65da32e7-79d5-4b2a-937c-8890711c77f4" (UID: "65da32e7-79d5-4b2a-937c-8890711c77f4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.583771 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65da32e7-79d5-4b2a-937c-8890711c77f4-kube-api-access-bsm7t" (OuterVolumeSpecName: "kube-api-access-bsm7t") pod "65da32e7-79d5-4b2a-937c-8890711c77f4" (UID: "65da32e7-79d5-4b2a-937c-8890711c77f4"). InnerVolumeSpecName "kube-api-access-bsm7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.681187 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z87fb\" (UniqueName: \"kubernetes.io/projected/ed44107e-4d92-4895-961b-4cce9234319c-kube-api-access-z87fb\") pod \"ed44107e-4d92-4895-961b-4cce9234319c\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.681301 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed44107e-4d92-4895-961b-4cce9234319c-operator-scripts\") pod \"ed44107e-4d92-4895-961b-4cce9234319c\" (UID: \"ed44107e-4d92-4895-961b-4cce9234319c\") " Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.681335 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfw9p\" (UniqueName: \"kubernetes.io/projected/e8b901a2-272b-4021-b8fc-f2e0051e68ce-kube-api-access-wfw9p\") pod \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.681411 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8b901a2-272b-4021-b8fc-f2e0051e68ce-operator-scripts\") pod \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\" (UID: \"e8b901a2-272b-4021-b8fc-f2e0051e68ce\") " Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.681708 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/65da32e7-79d5-4b2a-937c-8890711c77f4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.681727 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsm7t\" (UniqueName: \"kubernetes.io/projected/65da32e7-79d5-4b2a-937c-8890711c77f4-kube-api-access-bsm7t\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.682412 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8b901a2-272b-4021-b8fc-f2e0051e68ce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8b901a2-272b-4021-b8fc-f2e0051e68ce" (UID: "e8b901a2-272b-4021-b8fc-f2e0051e68ce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.684805 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8b901a2-272b-4021-b8fc-f2e0051e68ce-kube-api-access-wfw9p" (OuterVolumeSpecName: "kube-api-access-wfw9p") pod "e8b901a2-272b-4021-b8fc-f2e0051e68ce" (UID: "e8b901a2-272b-4021-b8fc-f2e0051e68ce"). InnerVolumeSpecName "kube-api-access-wfw9p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.756914 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed44107e-4d92-4895-961b-4cce9234319c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ed44107e-4d92-4895-961b-4cce9234319c" (UID: "ed44107e-4d92-4895-961b-4cce9234319c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.759327 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed44107e-4d92-4895-961b-4cce9234319c-kube-api-access-z87fb" (OuterVolumeSpecName: "kube-api-access-z87fb") pod "ed44107e-4d92-4895-961b-4cce9234319c" (UID: "ed44107e-4d92-4895-961b-4cce9234319c"). InnerVolumeSpecName "kube-api-access-z87fb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.782719 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ed44107e-4d92-4895-961b-4cce9234319c-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.782785 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfw9p\" (UniqueName: \"kubernetes.io/projected/e8b901a2-272b-4021-b8fc-f2e0051e68ce-kube-api-access-wfw9p\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.782797 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8b901a2-272b-4021-b8fc-f2e0051e68ce-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:05 crc kubenswrapper[4706]: I1206 05:45:05.782807 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z87fb\" (UniqueName: \"kubernetes.io/projected/ed44107e-4d92-4895-961b-4cce9234319c-kube-api-access-z87fb\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.125521 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-245b-account-create-update-fr24l" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.125522 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-245b-account-create-update-fr24l" event={"ID":"65da32e7-79d5-4b2a-937c-8890711c77f4","Type":"ContainerDied","Data":"e5cb9a59c4f095154ff48e0bf5c2f402f8bd5786fc47ab47bd4dcb948a30faa0"} Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.125649 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e5cb9a59c4f095154ff48e0bf5c2f402f8bd5786fc47ab47bd4dcb948a30faa0" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.128314 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f39c-account-create-update-xpw6m" event={"ID":"e8b901a2-272b-4021-b8fc-f2e0051e68ce","Type":"ContainerDied","Data":"0f973009b6201e0626131e0ccc58fd434007214af151d3943d138351d3d1366f"} Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.128353 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f973009b6201e0626131e0ccc58fd434007214af151d3943d138351d3d1366f" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.128368 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f39c-account-create-update-xpw6m" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.130829 4706 generic.go:334] "Generic (PLEG): container finished" podID="5e0f3a5c-0b70-43c6-9b1e-5729f502488e" containerID="9b0529fd61421d18bdbda5094f00317f5ecb5173b66ff1211f913dd36fa63b73" exitCode=0 Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.130916 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-bd4e-account-create-update-g6ws8" event={"ID":"5e0f3a5c-0b70-43c6-9b1e-5729f502488e","Type":"ContainerDied","Data":"9b0529fd61421d18bdbda5094f00317f5ecb5173b66ff1211f913dd36fa63b73"} Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.132703 4706 generic.go:334] "Generic (PLEG): container finished" podID="336ef851-faf5-4c3d-ad1f-316af4dedd9a" containerID="31c8e56e7f09c4baba93092443668f315c5ddfad6153dc36d306f07a63b30c43" exitCode=0 Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.132769 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ks7jq" event={"ID":"336ef851-faf5-4c3d-ad1f-316af4dedd9a","Type":"ContainerDied","Data":"31c8e56e7f09c4baba93092443668f315c5ddfad6153dc36d306f07a63b30c43"} Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.134387 4706 generic.go:334] "Generic (PLEG): container finished" podID="c1854690-b0e0-4052-95a6-6951260cdb0b" containerID="40400c374beb6242eb0b519fbd1c5e73b581aabc02097a6118d3beebc0532375" exitCode=0 Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.134466 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ab7c-account-create-update-knbmr" event={"ID":"c1854690-b0e0-4052-95a6-6951260cdb0b","Type":"ContainerDied","Data":"40400c374beb6242eb0b519fbd1c5e73b581aabc02097a6118d3beebc0532375"} Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.140877 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-3a0e-account-create-update-ctxbc" event={"ID":"ed44107e-4d92-4895-961b-4cce9234319c","Type":"ContainerDied","Data":"61c210f4eb72d1d08d0a762ad32e3fd55af736a4eff30dc45bf36d3ac948703e"} Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.140937 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="61c210f4eb72d1d08d0a762ad32e3fd55af736a4eff30dc45bf36d3ac948703e" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.141021 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-3a0e-account-create-update-ctxbc" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.586790 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.600427 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.697195 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlvct\" (UniqueName: \"kubernetes.io/projected/6ebc347a-c311-41c2-bed4-0fcd22e26342-kube-api-access-xlvct\") pod \"6ebc347a-c311-41c2-bed4-0fcd22e26342\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.697231 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88xm4\" (UniqueName: \"kubernetes.io/projected/c1854690-b0e0-4052-95a6-6951260cdb0b-kube-api-access-88xm4\") pod \"c1854690-b0e0-4052-95a6-6951260cdb0b\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.697265 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1854690-b0e0-4052-95a6-6951260cdb0b-operator-scripts\") pod \"c1854690-b0e0-4052-95a6-6951260cdb0b\" (UID: \"c1854690-b0e0-4052-95a6-6951260cdb0b\") " Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.697406 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ebc347a-c311-41c2-bed4-0fcd22e26342-operator-scripts\") pod \"6ebc347a-c311-41c2-bed4-0fcd22e26342\" (UID: \"6ebc347a-c311-41c2-bed4-0fcd22e26342\") " Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.698019 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1854690-b0e0-4052-95a6-6951260cdb0b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c1854690-b0e0-4052-95a6-6951260cdb0b" (UID: "c1854690-b0e0-4052-95a6-6951260cdb0b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.698173 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ebc347a-c311-41c2-bed4-0fcd22e26342-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6ebc347a-c311-41c2-bed4-0fcd22e26342" (UID: "6ebc347a-c311-41c2-bed4-0fcd22e26342"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.698312 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6ebc347a-c311-41c2-bed4-0fcd22e26342-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.698332 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c1854690-b0e0-4052-95a6-6951260cdb0b-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.702407 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ebc347a-c311-41c2-bed4-0fcd22e26342-kube-api-access-xlvct" (OuterVolumeSpecName: "kube-api-access-xlvct") pod "6ebc347a-c311-41c2-bed4-0fcd22e26342" (UID: "6ebc347a-c311-41c2-bed4-0fcd22e26342"). InnerVolumeSpecName "kube-api-access-xlvct". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.703389 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1854690-b0e0-4052-95a6-6951260cdb0b-kube-api-access-88xm4" (OuterVolumeSpecName: "kube-api-access-88xm4") pod "c1854690-b0e0-4052-95a6-6951260cdb0b" (UID: "c1854690-b0e0-4052-95a6-6951260cdb0b"). InnerVolumeSpecName "kube-api-access-88xm4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.800083 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlvct\" (UniqueName: \"kubernetes.io/projected/6ebc347a-c311-41c2-bed4-0fcd22e26342-kube-api-access-xlvct\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:06 crc kubenswrapper[4706]: I1206 05:45:06.800121 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88xm4\" (UniqueName: \"kubernetes.io/projected/c1854690-b0e0-4052-95a6-6951260cdb0b-kube-api-access-88xm4\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.150597 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-ab7c-account-create-update-knbmr" event={"ID":"c1854690-b0e0-4052-95a6-6951260cdb0b","Type":"ContainerDied","Data":"1894056fe4e023f6a1a99ddf982e82cdd4522a5624bd1d26398daf2bca94eaa7"} Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.150676 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1894056fe4e023f6a1a99ddf982e82cdd4522a5624bd1d26398daf2bca94eaa7" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.150634 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-ab7c-account-create-update-knbmr" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.152186 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-tnbqb" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.156089 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-tnbqb" event={"ID":"6ebc347a-c311-41c2-bed4-0fcd22e26342","Type":"ContainerDied","Data":"6373c7b356854a1bb49cb22316974cc91aa16547e80e681a3aec634695d8a299"} Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.156155 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6373c7b356854a1bb49cb22316974cc91aa16547e80e681a3aec634695d8a299" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.510715 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.518091 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.606691 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jnhxj"] Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607473 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f644703-222c-4d96-8473-8856ee25fb91" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607493 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f644703-222c-4d96-8473-8856ee25fb91" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607502 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d9537ae-09b5-49d5-a0e5-6d8e6e992170" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607508 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d9537ae-09b5-49d5-a0e5-6d8e6e992170" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607523 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8b901a2-272b-4021-b8fc-f2e0051e68ce" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607533 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8b901a2-272b-4021-b8fc-f2e0051e68ce" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607548 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e0f3a5c-0b70-43c6-9b1e-5729f502488e" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607555 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e0f3a5c-0b70-43c6-9b1e-5729f502488e" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607566 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ebc347a-c311-41c2-bed4-0fcd22e26342" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607572 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ebc347a-c311-41c2-bed4-0fcd22e26342" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607581 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f408368-233a-4ada-86e7-d4125b2a1bb2" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607587 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f408368-233a-4ada-86e7-d4125b2a1bb2" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607605 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed44107e-4d92-4895-961b-4cce9234319c" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607611 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed44107e-4d92-4895-961b-4cce9234319c" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607620 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65da32e7-79d5-4b2a-937c-8890711c77f4" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607626 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="65da32e7-79d5-4b2a-937c-8890711c77f4" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607635 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="336ef851-faf5-4c3d-ad1f-316af4dedd9a" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607642 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="336ef851-faf5-4c3d-ad1f-316af4dedd9a" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: E1206 05:45:07.607652 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1854690-b0e0-4052-95a6-6951260cdb0b" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607657 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1854690-b0e0-4052-95a6-6951260cdb0b" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607799 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1854690-b0e0-4052-95a6-6951260cdb0b" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607838 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f408368-233a-4ada-86e7-d4125b2a1bb2" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607853 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed44107e-4d92-4895-961b-4cce9234319c" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607868 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8b901a2-272b-4021-b8fc-f2e0051e68ce" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607882 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="336ef851-faf5-4c3d-ad1f-316af4dedd9a" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607891 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ebc347a-c311-41c2-bed4-0fcd22e26342" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607906 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e0f3a5c-0b70-43c6-9b1e-5729f502488e" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607915 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="65da32e7-79d5-4b2a-937c-8890711c77f4" containerName="mariadb-account-create-update" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607926 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d9537ae-09b5-49d5-a0e5-6d8e6e992170" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.607937 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f644703-222c-4d96-8473-8856ee25fb91" containerName="mariadb-database-create" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.609116 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.610259 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/336ef851-faf5-4c3d-ad1f-316af4dedd9a-operator-scripts\") pod \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.610359 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2q77t\" (UniqueName: \"kubernetes.io/projected/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-kube-api-access-2q77t\") pod \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.610491 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7xxn6\" (UniqueName: \"kubernetes.io/projected/336ef851-faf5-4c3d-ad1f-316af4dedd9a-kube-api-access-7xxn6\") pod \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\" (UID: \"336ef851-faf5-4c3d-ad1f-316af4dedd9a\") " Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.610586 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-operator-scripts\") pod \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\" (UID: \"5e0f3a5c-0b70-43c6-9b1e-5729f502488e\") " Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.611021 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/336ef851-faf5-4c3d-ad1f-316af4dedd9a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "336ef851-faf5-4c3d-ad1f-316af4dedd9a" (UID: "336ef851-faf5-4c3d-ad1f-316af4dedd9a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.611606 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5e0f3a5c-0b70-43c6-9b1e-5729f502488e" (UID: "5e0f3a5c-0b70-43c6-9b1e-5729f502488e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.612923 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jnhxj"] Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.623353 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-kube-api-access-2q77t" (OuterVolumeSpecName: "kube-api-access-2q77t") pod "5e0f3a5c-0b70-43c6-9b1e-5729f502488e" (UID: "5e0f3a5c-0b70-43c6-9b1e-5729f502488e"). InnerVolumeSpecName "kube-api-access-2q77t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.633864 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/336ef851-faf5-4c3d-ad1f-316af4dedd9a-kube-api-access-7xxn6" (OuterVolumeSpecName: "kube-api-access-7xxn6") pod "336ef851-faf5-4c3d-ad1f-316af4dedd9a" (UID: "336ef851-faf5-4c3d-ad1f-316af4dedd9a"). InnerVolumeSpecName "kube-api-access-7xxn6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.712627 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-utilities\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.712738 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lj4hh\" (UniqueName: \"kubernetes.io/projected/9326561b-a184-41e6-8c1e-4af6c493619d-kube-api-access-lj4hh\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.712863 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-catalog-content\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.713227 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.713261 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/336ef851-faf5-4c3d-ad1f-316af4dedd9a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.713271 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2q77t\" (UniqueName: \"kubernetes.io/projected/5e0f3a5c-0b70-43c6-9b1e-5729f502488e-kube-api-access-2q77t\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.713284 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7xxn6\" (UniqueName: \"kubernetes.io/projected/336ef851-faf5-4c3d-ad1f-316af4dedd9a-kube-api-access-7xxn6\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.814992 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-utilities\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.815086 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lj4hh\" (UniqueName: \"kubernetes.io/projected/9326561b-a184-41e6-8c1e-4af6c493619d-kube-api-access-lj4hh\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.815129 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-catalog-content\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.815571 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-utilities\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.815578 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-catalog-content\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.832861 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lj4hh\" (UniqueName: \"kubernetes.io/projected/9326561b-a184-41e6-8c1e-4af6c493619d-kube-api-access-lj4hh\") pod \"community-operators-jnhxj\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:07 crc kubenswrapper[4706]: I1206 05:45:07.972288 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.160352 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-bd4e-account-create-update-g6ws8" Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.160359 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-bd4e-account-create-update-g6ws8" event={"ID":"5e0f3a5c-0b70-43c6-9b1e-5729f502488e","Type":"ContainerDied","Data":"f3dfcb499fe7701f7910e53545fc8f91d208144438a0e781ade5f84c5cebe631"} Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.160399 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f3dfcb499fe7701f7910e53545fc8f91d208144438a0e781ade5f84c5cebe631" Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.167285 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ks7jq" event={"ID":"336ef851-faf5-4c3d-ad1f-316af4dedd9a","Type":"ContainerDied","Data":"31f430e60f569adf4152e3cfba55f1e75880740ba44eb9ee0b24b04750423a4c"} Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.167333 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="31f430e60f569adf4152e3cfba55f1e75880740ba44eb9ee0b24b04750423a4c" Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.167342 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ks7jq" Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.532310 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jnhxj"] Dec 06 05:45:08 crc kubenswrapper[4706]: W1206 05:45:08.536330 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9326561b_a184_41e6_8c1e_4af6c493619d.slice/crio-54b83b3ae559c9de4b446dea565bd8ce9274db862c39f135fbcb4d46fd419c10 WatchSource:0}: Error finding container 54b83b3ae559c9de4b446dea565bd8ce9274db862c39f135fbcb4d46fd419c10: Status 404 returned error can't find the container with id 54b83b3ae559c9de4b446dea565bd8ce9274db862c39f135fbcb4d46fd419c10 Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.932508 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:45:08 crc kubenswrapper[4706]: I1206 05:45:08.938980 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/91f74906-ec70-4b0c-a657-d075d18f488b-etc-swift\") pod \"swift-storage-0\" (UID: \"91f74906-ec70-4b0c-a657-d075d18f488b\") " pod="openstack/swift-storage-0" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.084519 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.207264 4706 generic.go:334] "Generic (PLEG): container finished" podID="9326561b-a184-41e6-8c1e-4af6c493619d" containerID="f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524" exitCode=0 Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.207564 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerDied","Data":"f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524"} Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.207641 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerStarted","Data":"54b83b3ae559c9de4b446dea565bd8ce9274db862c39f135fbcb4d46fd419c10"} Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.255909 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-97dzf"] Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.257721 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.264603 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-97dzf"] Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.264790 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.265229 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.265390 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsz48" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.270869 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.325601 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-pjt6m"] Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.326768 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.338850 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-combined-ca-bundle\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.338877 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lz85s\" (UniqueName: \"kubernetes.io/projected/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-kube-api-access-lz85s\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.338959 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-config-data\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.341413 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mw9mc" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.341542 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.383375 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-pjt6m"] Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440571 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-db-sync-config-data\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440637 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-combined-ca-bundle\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440660 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lz85s\" (UniqueName: \"kubernetes.io/projected/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-kube-api-access-lz85s\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440734 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-combined-ca-bundle\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440776 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s57cs\" (UniqueName: \"kubernetes.io/projected/bc067583-4394-4fa3-86fc-d6e626ec0f18-kube-api-access-s57cs\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440804 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-config-data\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.440978 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-config-data\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.445783 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-combined-ca-bundle\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.445930 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-config-data\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.456319 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lz85s\" (UniqueName: \"kubernetes.io/projected/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-kube-api-access-lz85s\") pod \"keystone-db-sync-97dzf\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.518334 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Dec 06 05:45:09 crc kubenswrapper[4706]: W1206 05:45:09.520089 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91f74906_ec70_4b0c_a657_d075d18f488b.slice/crio-4aad3cb84e3f1b83b86fe6f06ac132bc997f7ae35d24c4080a4f8ec590922200 WatchSource:0}: Error finding container 4aad3cb84e3f1b83b86fe6f06ac132bc997f7ae35d24c4080a4f8ec590922200: Status 404 returned error can't find the container with id 4aad3cb84e3f1b83b86fe6f06ac132bc997f7ae35d24c4080a4f8ec590922200 Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.542896 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-combined-ca-bundle\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.543129 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s57cs\" (UniqueName: \"kubernetes.io/projected/bc067583-4394-4fa3-86fc-d6e626ec0f18-kube-api-access-s57cs\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.543190 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-config-data\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.543270 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-db-sync-config-data\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.546714 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-db-sync-config-data\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.547777 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-config-data\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.547850 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-combined-ca-bundle\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.563478 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s57cs\" (UniqueName: \"kubernetes.io/projected/bc067583-4394-4fa3-86fc-d6e626ec0f18-kube-api-access-s57cs\") pod \"glance-db-sync-pjt6m\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.588170 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:09 crc kubenswrapper[4706]: I1206 05:45:09.645314 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-pjt6m" Dec 06 05:45:10 crc kubenswrapper[4706]: I1206 05:45:10.028613 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-97dzf"] Dec 06 05:45:10 crc kubenswrapper[4706]: I1206 05:45:10.215330 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"4aad3cb84e3f1b83b86fe6f06ac132bc997f7ae35d24c4080a4f8ec590922200"} Dec 06 05:45:10 crc kubenswrapper[4706]: I1206 05:45:10.219698 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerStarted","Data":"7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4"} Dec 06 05:45:10 crc kubenswrapper[4706]: I1206 05:45:10.222620 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-97dzf" event={"ID":"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406","Type":"ContainerStarted","Data":"937728fbf6c50a19eba65f846129518f54a8cf71e859e96bd92e360235be5993"} Dec 06 05:45:10 crc kubenswrapper[4706]: I1206 05:45:10.341064 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-pjt6m"] Dec 06 05:45:10 crc kubenswrapper[4706]: W1206 05:45:10.341765 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbc067583_4394_4fa3_86fc_d6e626ec0f18.slice/crio-f58590dfbd3033126090481ed225e6ccd32bf33f46517199611ef7161c3aa425 WatchSource:0}: Error finding container f58590dfbd3033126090481ed225e6ccd32bf33f46517199611ef7161c3aa425: Status 404 returned error can't find the container with id f58590dfbd3033126090481ed225e6ccd32bf33f46517199611ef7161c3aa425 Dec 06 05:45:11 crc kubenswrapper[4706]: I1206 05:45:11.231718 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-pjt6m" event={"ID":"bc067583-4394-4fa3-86fc-d6e626ec0f18","Type":"ContainerStarted","Data":"f58590dfbd3033126090481ed225e6ccd32bf33f46517199611ef7161c3aa425"} Dec 06 05:45:12 crc kubenswrapper[4706]: I1206 05:45:12.243014 4706 generic.go:334] "Generic (PLEG): container finished" podID="9326561b-a184-41e6-8c1e-4af6c493619d" containerID="7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4" exitCode=0 Dec 06 05:45:12 crc kubenswrapper[4706]: I1206 05:45:12.243108 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerDied","Data":"7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4"} Dec 06 05:45:20 crc kubenswrapper[4706]: I1206 05:45:20.850260 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:45:24 crc kubenswrapper[4706]: I1206 05:45:24.436362 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerStarted","Data":"6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e"} Dec 06 05:45:24 crc kubenswrapper[4706]: I1206 05:45:24.439036 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-97dzf" event={"ID":"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406","Type":"ContainerStarted","Data":"b22222290f9507501338cc9487939041cdd504374d0236cb6493aadca78433e6"} Dec 06 05:45:24 crc kubenswrapper[4706]: I1206 05:45:24.442929 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"0065c49444e98cc206e6dabcb215d353dd0c0a3d610f00e358e982a5d3d3802d"} Dec 06 05:45:24 crc kubenswrapper[4706]: I1206 05:45:24.442981 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"2d41fc5d2331fe0fe933d7e449ef7b5c9ea99d6425227fe2205a1683a06fd025"} Dec 06 05:45:24 crc kubenswrapper[4706]: I1206 05:45:24.467057 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jnhxj" podStartSLOduration=3.065249195 podStartE2EDuration="17.46701974s" podCreationTimestamp="2025-12-06 05:45:07 +0000 UTC" firstStartedPulling="2025-12-06 05:45:09.209453623 +0000 UTC m=+1531.537277567" lastFinishedPulling="2025-12-06 05:45:23.611224158 +0000 UTC m=+1545.939048112" observedRunningTime="2025-12-06 05:45:24.456242529 +0000 UTC m=+1546.784066493" watchObservedRunningTime="2025-12-06 05:45:24.46701974 +0000 UTC m=+1546.794843694" Dec 06 05:45:24 crc kubenswrapper[4706]: I1206 05:45:24.474507 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-97dzf" podStartSLOduration=1.901352393 podStartE2EDuration="15.474491342s" podCreationTimestamp="2025-12-06 05:45:09 +0000 UTC" firstStartedPulling="2025-12-06 05:45:10.039346954 +0000 UTC m=+1532.367170898" lastFinishedPulling="2025-12-06 05:45:23.612485903 +0000 UTC m=+1545.940309847" observedRunningTime="2025-12-06 05:45:24.471206073 +0000 UTC m=+1546.799030037" watchObservedRunningTime="2025-12-06 05:45:24.474491342 +0000 UTC m=+1546.802315286" Dec 06 05:45:25 crc kubenswrapper[4706]: I1206 05:45:25.491685 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"e20ae9a3cc0f68cdc1210da8d1b6090dffb41dab550d559979f94e332103322a"} Dec 06 05:45:25 crc kubenswrapper[4706]: I1206 05:45:25.491956 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"9332808cde6a00a90817715b5b1f95b7769c61695ff01d4ec6355409ccba6a5d"} Dec 06 05:45:27 crc kubenswrapper[4706]: I1206 05:45:27.978410 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:27 crc kubenswrapper[4706]: I1206 05:45:27.978794 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:28 crc kubenswrapper[4706]: I1206 05:45:28.056178 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:30 crc kubenswrapper[4706]: I1206 05:45:30.578463 4706 generic.go:334] "Generic (PLEG): container finished" podID="e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" containerID="b22222290f9507501338cc9487939041cdd504374d0236cb6493aadca78433e6" exitCode=0 Dec 06 05:45:30 crc kubenswrapper[4706]: I1206 05:45:30.578523 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-97dzf" event={"ID":"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406","Type":"ContainerDied","Data":"b22222290f9507501338cc9487939041cdd504374d0236cb6493aadca78433e6"} Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.023979 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.087925 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jnhxj"] Dec 06 05:45:38 crc kubenswrapper[4706]: E1206 05:45:38.593037 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Dec 06 05:45:38 crc kubenswrapper[4706]: E1206 05:45:38.593513 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s57cs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-pjt6m_openstack(bc067583-4394-4fa3-86fc-d6e626ec0f18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:45:38 crc kubenswrapper[4706]: E1206 05:45:38.594781 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-pjt6m" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.626226 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.665640 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-97dzf" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.665698 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-97dzf" event={"ID":"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406","Type":"ContainerDied","Data":"937728fbf6c50a19eba65f846129518f54a8cf71e859e96bd92e360235be5993"} Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.665734 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="937728fbf6c50a19eba65f846129518f54a8cf71e859e96bd92e360235be5993" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.665950 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jnhxj" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="registry-server" containerID="cri-o://6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e" gracePeriod=2 Dec 06 05:45:38 crc kubenswrapper[4706]: E1206 05:45:38.667293 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-pjt6m" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.799582 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-combined-ca-bundle\") pod \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.799659 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-config-data\") pod \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.799815 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz85s\" (UniqueName: \"kubernetes.io/projected/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-kube-api-access-lz85s\") pod \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\" (UID: \"e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406\") " Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.805610 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-kube-api-access-lz85s" (OuterVolumeSpecName: "kube-api-access-lz85s") pod "e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" (UID: "e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406"). InnerVolumeSpecName "kube-api-access-lz85s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.828701 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" (UID: "e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:45:38 crc kubenswrapper[4706]: E1206 05:45:38.833563 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-container:current-podified" Dec 06 05:45:38 crc kubenswrapper[4706]: E1206 05:45:38.833743 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-server,Image:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,Command:[/usr/bin/swift-container-server /etc/swift/container-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:container,HostPort:0,ContainerPort:6201,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h56h9dh94h67bh697h95h55hbh555h556h675h5fdh57dh579h5fbh64fh5c9h687hb6h678h5d4h549h54h98h8ch564h5bh5bch55dhc8hf8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b2szw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(91f74906-ec70-4b0c-a657-d075d18f488b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.858239 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-config-data" (OuterVolumeSpecName: "config-data") pod "e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" (UID: "e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.901847 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz85s\" (UniqueName: \"kubernetes.io/projected/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-kube-api-access-lz85s\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.901882 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:38 crc kubenswrapper[4706]: I1206 05:45:38.901896 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.059925 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.206219 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-utilities\") pod \"9326561b-a184-41e6-8c1e-4af6c493619d\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.206303 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-catalog-content\") pod \"9326561b-a184-41e6-8c1e-4af6c493619d\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.206337 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lj4hh\" (UniqueName: \"kubernetes.io/projected/9326561b-a184-41e6-8c1e-4af6c493619d-kube-api-access-lj4hh\") pod \"9326561b-a184-41e6-8c1e-4af6c493619d\" (UID: \"9326561b-a184-41e6-8c1e-4af6c493619d\") " Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.208774 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-utilities" (OuterVolumeSpecName: "utilities") pod "9326561b-a184-41e6-8c1e-4af6c493619d" (UID: "9326561b-a184-41e6-8c1e-4af6c493619d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.212958 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9326561b-a184-41e6-8c1e-4af6c493619d-kube-api-access-lj4hh" (OuterVolumeSpecName: "kube-api-access-lj4hh") pod "9326561b-a184-41e6-8c1e-4af6c493619d" (UID: "9326561b-a184-41e6-8c1e-4af6c493619d"). InnerVolumeSpecName "kube-api-access-lj4hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.257370 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9326561b-a184-41e6-8c1e-4af6c493619d" (UID: "9326561b-a184-41e6-8c1e-4af6c493619d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.308266 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.308310 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9326561b-a184-41e6-8c1e-4af6c493619d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.308329 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lj4hh\" (UniqueName: \"kubernetes.io/projected/9326561b-a184-41e6-8c1e-4af6c493619d-kube-api-access-lj4hh\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.673838 4706 generic.go:334] "Generic (PLEG): container finished" podID="9326561b-a184-41e6-8c1e-4af6c493619d" containerID="6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e" exitCode=0 Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.674187 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerDied","Data":"6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e"} Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.674209 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jnhxj" event={"ID":"9326561b-a184-41e6-8c1e-4af6c493619d","Type":"ContainerDied","Data":"54b83b3ae559c9de4b446dea565bd8ce9274db862c39f135fbcb4d46fd419c10"} Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.674225 4706 scope.go:117] "RemoveContainer" containerID="6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.674335 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jnhxj" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.708127 4706 scope.go:117] "RemoveContainer" containerID="7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.719210 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jnhxj"] Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.727295 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jnhxj"] Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.742387 4706 scope.go:117] "RemoveContainer" containerID="f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.777552 4706 scope.go:117] "RemoveContainer" containerID="6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e" Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.778088 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e\": container with ID starting with 6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e not found: ID does not exist" containerID="6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.778122 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e"} err="failed to get container status \"6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e\": rpc error: code = NotFound desc = could not find container \"6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e\": container with ID starting with 6547fa233ff66ccdb8c5410f538b30dec20e6f375e9b99277fdfbca355c9190e not found: ID does not exist" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.778144 4706 scope.go:117] "RemoveContainer" containerID="7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4" Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.778462 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4\": container with ID starting with 7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4 not found: ID does not exist" containerID="7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.778480 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4"} err="failed to get container status \"7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4\": rpc error: code = NotFound desc = could not find container \"7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4\": container with ID starting with 7861502f8677e77362ab9e307f747324f496a340af075c8a36c98a31da3e5ca4 not found: ID does not exist" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.778495 4706 scope.go:117] "RemoveContainer" containerID="f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524" Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.778829 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524\": container with ID starting with f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524 not found: ID does not exist" containerID="f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.778847 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524"} err="failed to get container status \"f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524\": rpc error: code = NotFound desc = could not find container \"f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524\": container with ID starting with f459b1cf5ceaab3e193dc35773626fe46fc903e1250adb418b0dd6e6859c5524 not found: ID does not exist" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900297 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-qfjjs"] Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.900672 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" containerName="keystone-db-sync" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900685 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" containerName="keystone-db-sync" Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.900697 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="registry-server" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900703 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="registry-server" Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.900725 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="extract-utilities" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900732 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="extract-utilities" Dec 06 05:45:39 crc kubenswrapper[4706]: E1206 05:45:39.900741 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="extract-content" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900747 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="extract-content" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900897 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" containerName="keystone-db-sync" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.900922 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" containerName="registry-server" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.901843 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.924116 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-qfjjs"] Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.928719 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-gmgr6"] Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.937086 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.939454 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsz48" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.939689 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.940222 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.940345 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.940467 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 06 05:45:39 crc kubenswrapper[4706]: I1206 05:45:39.949119 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gmgr6"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027014 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-dns-svc\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027115 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glgr6\" (UniqueName: \"kubernetes.io/projected/b6b061c1-b58e-4c07-961f-150c13cd6d39-kube-api-access-glgr6\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027157 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-fernet-keys\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027184 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-combined-ca-bundle\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027232 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-config\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027263 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lkmj\" (UniqueName: \"kubernetes.io/projected/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-kube-api-access-2lkmj\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027286 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-credential-keys\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027331 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027353 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027382 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-config-data\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.027410 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-scripts\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.071486 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9326561b-a184-41e6-8c1e-4af6c493619d" path="/var/lib/kubelet/pods/9326561b-a184-41e6-8c1e-4af6c493619d/volumes" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.100124 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-58b8554b5c-s766w"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.101519 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.104922 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-9pkl6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.105204 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.105313 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.109026 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.111250 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-csbkx"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.112192 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.122225 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129068 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lkmj\" (UniqueName: \"kubernetes.io/projected/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-kube-api-access-2lkmj\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129103 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-credential-keys\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129142 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129162 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129185 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-config-data\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129203 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-scripts\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129233 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-dns-svc\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129269 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glgr6\" (UniqueName: \"kubernetes.io/projected/b6b061c1-b58e-4c07-961f-150c13cd6d39-kube-api-access-glgr6\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129291 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-fernet-keys\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129310 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-combined-ca-bundle\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.129336 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-config\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.130591 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8rgx9" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.134167 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-credential-keys\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.134635 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.135842 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-sb\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.136910 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-dns-svc\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.136943 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-nb\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.136943 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-config\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.138823 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-config-data\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.139479 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-scripts\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.139977 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-fernet-keys\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.142012 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-combined-ca-bundle\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.155682 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lkmj\" (UniqueName: \"kubernetes.io/projected/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-kube-api-access-2lkmj\") pod \"dnsmasq-dns-f877ddd87-qfjjs\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.162716 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glgr6\" (UniqueName: \"kubernetes.io/projected/b6b061c1-b58e-4c07-961f-150c13cd6d39-kube-api-access-glgr6\") pod \"keystone-bootstrap-gmgr6\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.177424 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58b8554b5c-s766w"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.212770 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-csbkx"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.231279 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z98j\" (UniqueName: \"kubernetes.io/projected/7bec3465-219b-4c57-83a9-aed4c78d1483-kube-api-access-8z98j\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.231850 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5574c4c-5438-4eca-bf23-03972c42720a-logs\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232008 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5574c4c-5438-4eca-bf23-03972c42720a-horizon-secret-key\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232231 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-combined-ca-bundle\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232348 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7bec3465-219b-4c57-83a9-aed4c78d1483-etc-machine-id\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232446 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-config-data\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232561 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-scripts\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232739 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwtx8\" (UniqueName: \"kubernetes.io/projected/b5574c4c-5438-4eca-bf23-03972c42720a-kube-api-access-wwtx8\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232873 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-scripts\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.232995 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-db-sync-config-data\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.233232 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-config-data\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.236024 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.242448 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.244901 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.248922 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.249397 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.254521 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.267270 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.299304 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-9vnhs"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.300458 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.308609 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.308878 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.308901 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-6kh5g" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.311840 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-bs4sf"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.312876 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.334280 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.340513 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nknn7" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.348837 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-db-sync-config-data\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.351609 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-config-data\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.352097 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z98j\" (UniqueName: \"kubernetes.io/projected/7bec3465-219b-4c57-83a9-aed4c78d1483-kube-api-access-8z98j\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.352373 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5574c4c-5438-4eca-bf23-03972c42720a-logs\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.352717 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-run-httpd\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.352744 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5574c4c-5438-4eca-bf23-03972c42720a-horizon-secret-key\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.352883 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-config-data\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.352919 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-log-httpd\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.353195 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-combined-ca-bundle\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.354726 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-scripts\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.354768 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7bec3465-219b-4c57-83a9-aed4c78d1483-etc-machine-id\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.357026 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-config-data\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.357336 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-scripts\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.357388 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5sgs\" (UniqueName: \"kubernetes.io/projected/b78581a5-4314-4209-967f-715fa91ee6a7-kube-api-access-j5sgs\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.357628 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.357655 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwtx8\" (UniqueName: \"kubernetes.io/projected/b5574c4c-5438-4eca-bf23-03972c42720a-kube-api-access-wwtx8\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.357870 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.359464 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-scripts\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.375244 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7bec3465-219b-4c57-83a9-aed4c78d1483-etc-machine-id\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.386566 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-db-sync-config-data\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.388956 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5574c4c-5438-4eca-bf23-03972c42720a-logs\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.389448 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-scripts\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.390187 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-combined-ca-bundle\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.393455 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5574c4c-5438-4eca-bf23-03972c42720a-horizon-secret-key\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.394290 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-config-data\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.396967 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-config-data\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.396994 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-9vnhs"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.397574 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-scripts\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.410869 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-mzfvf"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.432214 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwtx8\" (UniqueName: \"kubernetes.io/projected/b5574c4c-5438-4eca-bf23-03972c42720a-kube-api-access-wwtx8\") pod \"horizon-58b8554b5c-s766w\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.434861 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z98j\" (UniqueName: \"kubernetes.io/projected/7bec3465-219b-4c57-83a9-aed4c78d1483-kube-api-access-8z98j\") pod \"cinder-db-sync-csbkx\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.435673 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.438646 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wb9tg" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.441314 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-bs4sf"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.442430 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.442725 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.453636 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-qfjjs"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.466150 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-mzfvf"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.477593 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5sgs\" (UniqueName: \"kubernetes.io/projected/b78581a5-4314-4209-967f-715fa91ee6a7-kube-api-access-j5sgs\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.479820 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.480023 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.480776 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-combined-ca-bundle\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.480919 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-config\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.481092 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4p8bf\" (UniqueName: \"kubernetes.io/projected/03d1d05b-3978-41bd-a7b6-5c0465432409-kube-api-access-4p8bf\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.481242 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br92b\" (UniqueName: \"kubernetes.io/projected/2a7ff6dd-4101-4650-a9b5-af050055f631-kube-api-access-br92b\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.481396 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-run-httpd\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.481819 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-config-data\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.481949 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-log-httpd\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.476426 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-csbkx" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.482399 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-scripts\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.482512 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-db-sync-config-data\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.482633 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-combined-ca-bundle\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.482224 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-run-httpd\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.482564 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-log-httpd\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.484233 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.485453 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.485574 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-84f54757c7-wbhlc"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.487299 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.487997 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-scripts\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.493231 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-config-data\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.499171 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-fnr2t"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.500988 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.510784 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5sgs\" (UniqueName: \"kubernetes.io/projected/b78581a5-4314-4209-967f-715fa91ee6a7-kube-api-access-j5sgs\") pod \"ceilometer-0\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.542361 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-fnr2t"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.556092 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84f54757c7-wbhlc"] Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.583997 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67dlv\" (UniqueName: \"kubernetes.io/projected/8115e0ca-7198-41ac-bed9-5186d95819c9-kube-api-access-67dlv\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584043 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-config-data\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584077 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8jr6c\" (UniqueName: \"kubernetes.io/projected/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-kube-api-access-8jr6c\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584100 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nfnjd\" (UniqueName: \"kubernetes.io/projected/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-kube-api-access-nfnjd\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584144 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-db-sync-config-data\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584175 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-scripts\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584194 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-combined-ca-bundle\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584214 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8115e0ca-7198-41ac-bed9-5186d95819c9-logs\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584243 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8115e0ca-7198-41ac-bed9-5186d95819c9-horizon-secret-key\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584268 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-combined-ca-bundle\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584288 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-combined-ca-bundle\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584307 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-config\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584331 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584352 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4p8bf\" (UniqueName: \"kubernetes.io/projected/03d1d05b-3978-41bd-a7b6-5c0465432409-kube-api-access-4p8bf\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584379 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-scripts\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584397 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-logs\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584415 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584432 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br92b\" (UniqueName: \"kubernetes.io/projected/2a7ff6dd-4101-4650-a9b5-af050055f631-kube-api-access-br92b\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584449 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-config\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584466 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.584508 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-config-data\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.588767 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-combined-ca-bundle\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.589140 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-config\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.591825 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-db-sync-config-data\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.593605 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-combined-ca-bundle\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.605252 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.617180 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br92b\" (UniqueName: \"kubernetes.io/projected/2a7ff6dd-4101-4650-a9b5-af050055f631-kube-api-access-br92b\") pod \"barbican-db-sync-bs4sf\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.619215 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4p8bf\" (UniqueName: \"kubernetes.io/projected/03d1d05b-3978-41bd-a7b6-5c0465432409-kube-api-access-4p8bf\") pod \"neutron-db-sync-9vnhs\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.649598 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696366 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-scripts\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696411 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8115e0ca-7198-41ac-bed9-5186d95819c9-logs\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696439 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8115e0ca-7198-41ac-bed9-5186d95819c9-horizon-secret-key\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696473 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-combined-ca-bundle\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696503 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696530 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-scripts\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696552 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-logs\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696569 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696588 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-config\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696607 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696637 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-config-data\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696661 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67dlv\" (UniqueName: \"kubernetes.io/projected/8115e0ca-7198-41ac-bed9-5186d95819c9-kube-api-access-67dlv\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696677 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-config-data\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696696 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8jr6c\" (UniqueName: \"kubernetes.io/projected/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-kube-api-access-8jr6c\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.696718 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nfnjd\" (UniqueName: \"kubernetes.io/projected/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-kube-api-access-nfnjd\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.697670 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-sb\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.697960 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-logs\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.698559 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-dns-svc\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.698841 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-config-data\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.698876 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-config\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.699233 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8115e0ca-7198-41ac-bed9-5186d95819c9-logs\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.700771 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-nb\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.703390 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.704426 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-scripts\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.705944 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-combined-ca-bundle\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.711743 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-config-data\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.722037 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-scripts\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.725280 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nfnjd\" (UniqueName: \"kubernetes.io/projected/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-kube-api-access-nfnjd\") pod \"dnsmasq-dns-68dcc9cf6f-fnr2t\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.729459 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.735653 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8jr6c\" (UniqueName: \"kubernetes.io/projected/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-kube-api-access-8jr6c\") pod \"placement-db-sync-mzfvf\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.738615 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8115e0ca-7198-41ac-bed9-5186d95819c9-horizon-secret-key\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.750596 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67dlv\" (UniqueName: \"kubernetes.io/projected/8115e0ca-7198-41ac-bed9-5186d95819c9-kube-api-access-67dlv\") pod \"horizon-84f54757c7-wbhlc\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.763878 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mzfvf" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.764414 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.821975 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.853114 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"006a94ebb8f764fb00acf86db6961ebcab434b135c749f07f9b996625e974c47"} Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.853894 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-qfjjs"] Dec 06 05:45:40 crc kubenswrapper[4706]: W1206 05:45:40.903728 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb8f2678c_037b_4ff3_8fdb_a9fcd0a42ba1.slice/crio-9e9e59d7f23fb8cfe1200619c00d8c2b7a1dfb35b400d8afbe5c83ddb1918e73 WatchSource:0}: Error finding container 9e9e59d7f23fb8cfe1200619c00d8c2b7a1dfb35b400d8afbe5c83ddb1918e73: Status 404 returned error can't find the container with id 9e9e59d7f23fb8cfe1200619c00d8c2b7a1dfb35b400d8afbe5c83ddb1918e73 Dec 06 05:45:40 crc kubenswrapper[4706]: I1206 05:45:40.912494 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-gmgr6"] Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.205893 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-csbkx"] Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.339092 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:45:41 crc kubenswrapper[4706]: W1206 05:45:41.346447 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb78581a5_4314_4209_967f_715fa91ee6a7.slice/crio-c4b2c8919f7ec895a03cabd990fdd4b8e679c7a4dd8ad2ef8bbc3003a0e74d6d WatchSource:0}: Error finding container c4b2c8919f7ec895a03cabd990fdd4b8e679c7a4dd8ad2ef8bbc3003a0e74d6d: Status 404 returned error can't find the container with id c4b2c8919f7ec895a03cabd990fdd4b8e679c7a4dd8ad2ef8bbc3003a0e74d6d Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.577240 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-9vnhs"] Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.594721 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84f54757c7-wbhlc"] Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.608835 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-mzfvf"] Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.619664 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-bs4sf"] Dec 06 05:45:41 crc kubenswrapper[4706]: W1206 05:45:41.622808 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4f0ced3b_4b02_4ce1_935a_af7cc2e01346.slice/crio-728777caa0e2511d8486dd4026f8941930fcf4e4dab320a52969434e39da32ab WatchSource:0}: Error finding container 728777caa0e2511d8486dd4026f8941930fcf4e4dab320a52969434e39da32ab: Status 404 returned error can't find the container with id 728777caa0e2511d8486dd4026f8941930fcf4e4dab320a52969434e39da32ab Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.763991 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-fnr2t"] Dec 06 05:45:41 crc kubenswrapper[4706]: W1206 05:45:41.766876 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf77dd76e_3bec_475c_aefd_7d8a7d9fef84.slice/crio-0c43d645cdc4579b3decf790ccfd189a4d9ba8f9e05b595855904b16d80e2224 WatchSource:0}: Error finding container 0c43d645cdc4579b3decf790ccfd189a4d9ba8f9e05b595855904b16d80e2224: Status 404 returned error can't find the container with id 0c43d645cdc4579b3decf790ccfd189a4d9ba8f9e05b595855904b16d80e2224 Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.777412 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58b8554b5c-s766w"] Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.876745 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mzfvf" event={"ID":"4f0ced3b-4b02-4ce1-935a-af7cc2e01346","Type":"ContainerStarted","Data":"728777caa0e2511d8486dd4026f8941930fcf4e4dab320a52969434e39da32ab"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.879565 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9vnhs" event={"ID":"03d1d05b-3978-41bd-a7b6-5c0465432409","Type":"ContainerStarted","Data":"c8db130af8dc5b46ef0cb12d6b97c685b0e4d7b8f2a8d3520e843fda5e2b6733"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.880878 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58b8554b5c-s766w" event={"ID":"b5574c4c-5438-4eca-bf23-03972c42720a","Type":"ContainerStarted","Data":"875c70a0e43b5f3d685b351b3a4f4ab6b8ba54b881f25eb44d55e8908339f40a"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.881965 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gmgr6" event={"ID":"b6b061c1-b58e-4c07-961f-150c13cd6d39","Type":"ContainerStarted","Data":"0d5f10632e82e97d62d20ae61fde4c2d46e96e255197bc1c392ef5e3a6650508"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.881989 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gmgr6" event={"ID":"b6b061c1-b58e-4c07-961f-150c13cd6d39","Type":"ContainerStarted","Data":"7605f8e439c382d83dde4749bc8c90c16c4c16e232e647f3d08836c7a39c2928"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.885176 4706 generic.go:334] "Generic (PLEG): container finished" podID="b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" containerID="eb75d867c069b8e5f73e0de5740c621c526bfb0a56633d3c67b2751a9efdddf7" exitCode=0 Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.885243 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" event={"ID":"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1","Type":"ContainerDied","Data":"eb75d867c069b8e5f73e0de5740c621c526bfb0a56633d3c67b2751a9efdddf7"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.885296 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" event={"ID":"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1","Type":"ContainerStarted","Data":"9e9e59d7f23fb8cfe1200619c00d8c2b7a1dfb35b400d8afbe5c83ddb1918e73"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.886805 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84f54757c7-wbhlc" event={"ID":"8115e0ca-7198-41ac-bed9-5186d95819c9","Type":"ContainerStarted","Data":"1ff83f11e54a05e27d05137858faf59adfc08dd66090ab9cd45c6498c50351d7"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.888183 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-csbkx" event={"ID":"7bec3465-219b-4c57-83a9-aed4c78d1483","Type":"ContainerStarted","Data":"ba7f91f7348165896fcb59713b39f97a573a22dc1b6f9d23f19257731a925fc2"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.889223 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerStarted","Data":"c4b2c8919f7ec895a03cabd990fdd4b8e679c7a4dd8ad2ef8bbc3003a0e74d6d"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.904154 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bs4sf" event={"ID":"2a7ff6dd-4101-4650-a9b5-af050055f631","Type":"ContainerStarted","Data":"93ffc157166b6aaf28c61dc6c82844d0321f12ef16a0d61aa7d6b891ce3b6038"} Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.913570 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-gmgr6" podStartSLOduration=2.9135468920000003 podStartE2EDuration="2.913546892s" podCreationTimestamp="2025-12-06 05:45:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:41.898530406 +0000 UTC m=+1564.226354350" watchObservedRunningTime="2025-12-06 05:45:41.913546892 +0000 UTC m=+1564.241370836" Dec 06 05:45:41 crc kubenswrapper[4706]: I1206 05:45:41.915558 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" event={"ID":"f77dd76e-3bec-475c-aefd-7d8a7d9fef84","Type":"ContainerStarted","Data":"0c43d645cdc4579b3decf790ccfd189a4d9ba8f9e05b595855904b16d80e2224"} Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.013188 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.101225 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-58b8554b5c-s766w"] Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.108991 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-594b46c997-9b6gw"] Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.120921 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.151775 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-594b46c997-9b6gw"] Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.224925 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.240384 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18221185-8d46-4fa8-8a4c-5fdfba2ef814-horizon-secret-key\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.240453 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tbwj6\" (UniqueName: \"kubernetes.io/projected/18221185-8d46-4fa8-8a4c-5fdfba2ef814-kube-api-access-tbwj6\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.240497 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-config-data\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.240558 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18221185-8d46-4fa8-8a4c-5fdfba2ef814-logs\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.240587 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-scripts\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.341891 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lkmj\" (UniqueName: \"kubernetes.io/projected/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-kube-api-access-2lkmj\") pod \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.342229 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-sb\") pod \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.342397 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-config\") pod \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.342523 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-dns-svc\") pod \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.342746 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-nb\") pod \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\" (UID: \"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1\") " Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.343018 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tbwj6\" (UniqueName: \"kubernetes.io/projected/18221185-8d46-4fa8-8a4c-5fdfba2ef814-kube-api-access-tbwj6\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.343143 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-config-data\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.343303 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18221185-8d46-4fa8-8a4c-5fdfba2ef814-logs\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.343418 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-scripts\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.343618 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18221185-8d46-4fa8-8a4c-5fdfba2ef814-horizon-secret-key\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.343697 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18221185-8d46-4fa8-8a4c-5fdfba2ef814-logs\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.344188 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-scripts\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.345054 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-config-data\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.351306 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-kube-api-access-2lkmj" (OuterVolumeSpecName: "kube-api-access-2lkmj") pod "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" (UID: "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1"). InnerVolumeSpecName "kube-api-access-2lkmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.351732 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18221185-8d46-4fa8-8a4c-5fdfba2ef814-horizon-secret-key\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.361241 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" (UID: "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.366299 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tbwj6\" (UniqueName: \"kubernetes.io/projected/18221185-8d46-4fa8-8a4c-5fdfba2ef814-kube-api-access-tbwj6\") pod \"horizon-594b46c997-9b6gw\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.366577 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-config" (OuterVolumeSpecName: "config") pod "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" (UID: "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.372240 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" (UID: "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.372743 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" (UID: "b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.445149 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.445188 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lkmj\" (UniqueName: \"kubernetes.io/projected/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-kube-api-access-2lkmj\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.445199 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.445211 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.445221 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.454566 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.928792 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.929012 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f877ddd87-qfjjs" event={"ID":"b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1","Type":"ContainerDied","Data":"9e9e59d7f23fb8cfe1200619c00d8c2b7a1dfb35b400d8afbe5c83ddb1918e73"} Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.929273 4706 scope.go:117] "RemoveContainer" containerID="eb75d867c069b8e5f73e0de5740c621c526bfb0a56633d3c67b2751a9efdddf7" Dec 06 05:45:42 crc kubenswrapper[4706]: I1206 05:45:42.964726 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-594b46c997-9b6gw"] Dec 06 05:45:43 crc kubenswrapper[4706]: I1206 05:45:43.059894 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-qfjjs"] Dec 06 05:45:43 crc kubenswrapper[4706]: I1206 05:45:43.069597 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f877ddd87-qfjjs"] Dec 06 05:45:43 crc kubenswrapper[4706]: I1206 05:45:43.937111 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594b46c997-9b6gw" event={"ID":"18221185-8d46-4fa8-8a4c-5fdfba2ef814","Type":"ContainerStarted","Data":"66c9d22a4ba6156b5383b6a64344ae27eace99b5a7a9ba604925ece99838ed19"} Dec 06 05:45:44 crc kubenswrapper[4706]: I1206 05:45:44.046901 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" path="/var/lib/kubelet/pods/b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1/volumes" Dec 06 05:45:45 crc kubenswrapper[4706]: I1206 05:45:45.991268 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9vnhs" event={"ID":"03d1d05b-3978-41bd-a7b6-5c0465432409","Type":"ContainerStarted","Data":"8ab97a3e9911e22c9eec4678e401db9aeeef45fe97e735d2361d29ec47239633"} Dec 06 05:45:46 crc kubenswrapper[4706]: I1206 05:45:46.000997 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"b0769c18e2105e12c931c3c70131b4c4c014ae80f5f82e104795b0d7ca5b7087"} Dec 06 05:45:46 crc kubenswrapper[4706]: I1206 05:45:46.001096 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"9c1f89d50235d3f2bd86ab1023bebacd2f18824a637d01bef4b33cb5d3a78b71"} Dec 06 05:45:46 crc kubenswrapper[4706]: I1206 05:45:46.001113 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"b1e6b36031d75afcefcedf3b71922400cc130b5badaf6d5b19fb3d0b2443747a"} Dec 06 05:45:46 crc kubenswrapper[4706]: I1206 05:45:46.004164 4706 generic.go:334] "Generic (PLEG): container finished" podID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerID="b4a0103806729f574d80bb1ea545de98c1342568829f38b075b4f9e596c5a57a" exitCode=0 Dec 06 05:45:46 crc kubenswrapper[4706]: I1206 05:45:46.004197 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" event={"ID":"f77dd76e-3bec-475c-aefd-7d8a7d9fef84","Type":"ContainerDied","Data":"b4a0103806729f574d80bb1ea545de98c1342568829f38b075b4f9e596c5a57a"} Dec 06 05:45:46 crc kubenswrapper[4706]: I1206 05:45:46.011194 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-9vnhs" podStartSLOduration=6.011173284 podStartE2EDuration="6.011173284s" podCreationTimestamp="2025-12-06 05:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:46.008301106 +0000 UTC m=+1568.336125080" watchObservedRunningTime="2025-12-06 05:45:46.011173284 +0000 UTC m=+1568.338997228" Dec 06 05:45:47 crc kubenswrapper[4706]: I1206 05:45:47.027183 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"471f9f2e05f7ea6f7c38d24580f5cfe93ba5fab7d2cf23db8decc09942807c0b"} Dec 06 05:45:47 crc kubenswrapper[4706]: I1206 05:45:47.027548 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"e0dce2cc02424cbee5257307f9d3fd9c25209c3ccda2f67c9c312faa8938170c"} Dec 06 05:45:47 crc kubenswrapper[4706]: I1206 05:45:47.030251 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" event={"ID":"f77dd76e-3bec-475c-aefd-7d8a7d9fef84","Type":"ContainerStarted","Data":"eec80dea3b09176ad51c414c7e11ae845f2ef6d04b169daf5d219ddb9fd1d63b"} Dec 06 05:45:47 crc kubenswrapper[4706]: I1206 05:45:47.063563 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" podStartSLOduration=7.063546024 podStartE2EDuration="7.063546024s" podCreationTimestamp="2025-12-06 05:45:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:45:47.053109021 +0000 UTC m=+1569.380932985" watchObservedRunningTime="2025-12-06 05:45:47.063546024 +0000 UTC m=+1569.391369968" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.644536 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.644798 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84f54757c7-wbhlc"] Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.644814 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7f979b84f6-hzq85"] Dec 06 05:45:48 crc kubenswrapper[4706]: E1206 05:45:48.645091 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" containerName="init" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.645102 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" containerName="init" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.645617 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f2678c-037b-4ff3-8fdb-a9fcd0a42ba1" containerName="init" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.646489 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f979b84f6-hzq85"] Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.646579 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.648696 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.667957 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-594b46c997-9b6gw"] Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.710792 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-8f474c4b8-xgvj4"] Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.712170 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.726923 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8f474c4b8-xgvj4"] Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780340 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-config-data\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780392 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-combined-ca-bundle\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780437 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-tls-certs\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780489 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-secret-key\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780517 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-scripts\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780680 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxl84\" (UniqueName: \"kubernetes.io/projected/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-kube-api-access-fxl84\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.780743 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-logs\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.882620 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-config-data\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.882676 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-horizon-secret-key\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.882806 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-logs\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.882828 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-combined-ca-bundle\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.882896 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-scripts\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.882972 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-secret-key\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883030 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz89l\" (UniqueName: \"kubernetes.io/projected/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-kube-api-access-tz89l\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883078 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-scripts\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883168 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxl84\" (UniqueName: \"kubernetes.io/projected/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-kube-api-access-fxl84\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883195 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-logs\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883334 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-config-data\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883393 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-combined-ca-bundle\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883430 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-horizon-tls-certs\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.883504 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-tls-certs\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.884640 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-logs\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.885560 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-scripts\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.891324 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-config-data\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.893530 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-secret-key\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.893807 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-tls-certs\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.902828 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxl84\" (UniqueName: \"kubernetes.io/projected/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-kube-api-access-fxl84\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.906237 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-combined-ca-bundle\") pod \"horizon-7f979b84f6-hzq85\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.984762 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985538 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-config-data\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985602 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-horizon-secret-key\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985634 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-logs\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985650 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-combined-ca-bundle\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985679 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-scripts\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985704 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz89l\" (UniqueName: \"kubernetes.io/projected/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-kube-api-access-tz89l\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.985825 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-horizon-tls-certs\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.986148 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-logs\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.986789 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-scripts\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.987355 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-config-data\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.989728 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-horizon-secret-key\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.990646 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-horizon-tls-certs\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:48 crc kubenswrapper[4706]: I1206 05:45:48.997991 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-combined-ca-bundle\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:49 crc kubenswrapper[4706]: I1206 05:45:49.004739 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz89l\" (UniqueName: \"kubernetes.io/projected/8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f-kube-api-access-tz89l\") pod \"horizon-8f474c4b8-xgvj4\" (UID: \"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f\") " pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:49 crc kubenswrapper[4706]: I1206 05:45:49.047396 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:45:49 crc kubenswrapper[4706]: I1206 05:45:49.062611 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"2e5e1e47856627a0dac030209c1afc21643b359a59cceb9a7db7d4d69b2943f0"} Dec 06 05:45:49 crc kubenswrapper[4706]: E1206 05:45:49.190285 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="91f74906-ec70-4b0c-a657-d075d18f488b" Dec 06 05:45:56 crc kubenswrapper[4706]: I1206 05:45:49.472029 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7f979b84f6-hzq85"] Dec 06 05:45:56 crc kubenswrapper[4706]: I1206 05:45:55.824278 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:45:56 crc kubenswrapper[4706]: I1206 05:45:55.915773 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-8p245"] Dec 06 05:45:56 crc kubenswrapper[4706]: I1206 05:45:55.916008 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-8p245" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" containerID="cri-o://4a5d58f6418f0596d2f8097adc7df29b2a513be60819a058e4cd4d12db366016" gracePeriod=10 Dec 06 05:45:56 crc kubenswrapper[4706]: I1206 05:45:56.043002 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-8p245" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.108:5353: connect: connection refused" Dec 06 05:45:56 crc kubenswrapper[4706]: E1206 05:45:56.696808 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Dec 06 05:45:56 crc kubenswrapper[4706]: E1206 05:45:56.697196 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8jr6c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-mzfvf_openstack(4f0ced3b-4b02-4ce1-935a-af7cc2e01346): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:45:56 crc kubenswrapper[4706]: E1206 05:45:56.698277 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-mzfvf" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" Dec 06 05:45:57 crc kubenswrapper[4706]: I1206 05:45:57.031743 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-8f474c4b8-xgvj4"] Dec 06 05:45:57 crc kubenswrapper[4706]: I1206 05:45:57.146579 4706 generic.go:334] "Generic (PLEG): container finished" podID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerID="4a5d58f6418f0596d2f8097adc7df29b2a513be60819a058e4cd4d12db366016" exitCode=0 Dec 06 05:45:57 crc kubenswrapper[4706]: I1206 05:45:57.146741 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-8p245" event={"ID":"1a54d701-bfc3-4f6e-acc3-b64b50e91d30","Type":"ContainerDied","Data":"4a5d58f6418f0596d2f8097adc7df29b2a513be60819a058e4cd4d12db366016"} Dec 06 05:45:57 crc kubenswrapper[4706]: E1206 05:45:57.148773 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-mzfvf" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" Dec 06 05:45:59 crc kubenswrapper[4706]: I1206 05:45:59.168098 4706 generic.go:334] "Generic (PLEG): container finished" podID="b6b061c1-b58e-4c07-961f-150c13cd6d39" containerID="0d5f10632e82e97d62d20ae61fde4c2d46e96e255197bc1c392ef5e3a6650508" exitCode=0 Dec 06 05:45:59 crc kubenswrapper[4706]: I1206 05:45:59.168179 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-gmgr6" event={"ID":"b6b061c1-b58e-4c07-961f-150c13cd6d39","Type":"ContainerDied","Data":"0d5f10632e82e97d62d20ae61fde4c2d46e96e255197bc1c392ef5e3a6650508"} Dec 06 05:46:06 crc kubenswrapper[4706]: I1206 05:46:06.042984 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-8p245" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.108:5353: i/o timeout" Dec 06 05:46:11 crc kubenswrapper[4706]: I1206 05:46:11.044136 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-8p245" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.108:5353: i/o timeout" Dec 06 05:46:11 crc kubenswrapper[4706]: I1206 05:46:11.044852 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:46:11 crc kubenswrapper[4706]: I1206 05:46:11.270805 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f979b84f6-hzq85" event={"ID":"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf","Type":"ContainerStarted","Data":"2a4fd40e570cd26faddc1bca7446994984d187e221497c601ad93e2dca508d92"} Dec 06 05:46:11 crc kubenswrapper[4706]: E1206 05:46:11.829287 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 06 05:46:11 crc kubenswrapper[4706]: E1206 05:46:11.829499 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ncdh59chcch55dh599hcch54dhb8h698hf9h56fh658h558h588hb7h65ch84h6bh89h5b4hbbh69h659h578h8dh666h55ch5cfh8chb4hbchf4q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tbwj6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-594b46c997-9b6gw_openstack(18221185-8d46-4fa8-8a4c-5fdfba2ef814): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:11 crc kubenswrapper[4706]: E1206 05:46:11.831739 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-594b46c997-9b6gw" podUID="18221185-8d46-4fa8-8a4c-5fdfba2ef814" Dec 06 05:46:11 crc kubenswrapper[4706]: E1206 05:46:11.912362 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 06 05:46:11 crc kubenswrapper[4706]: E1206 05:46:11.912558 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n669h8h677h657hd7h595h56ch54h98h84h68fh55fhbbh5f7h5b4h5ddh666h5fch694h554h676h65bh5cdh79h59ch9fhd4h9bh694h59bh5d7h5bfq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wwtx8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-58b8554b5c-s766w_openstack(b5574c4c-5438-4eca-bf23-03972c42720a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:11 crc kubenswrapper[4706]: E1206 05:46:11.914554 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-58b8554b5c-s766w" podUID="b5574c4c-5438-4eca-bf23-03972c42720a" Dec 06 05:46:12 crc kubenswrapper[4706]: E1206 05:46:12.025600 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 06 05:46:12 crc kubenswrapper[4706]: E1206 05:46:12.025749 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nf4h9h547h666h68h5c9h598h686h54bh6fh7bh54h95h65fhc4h99h54dh54bh67fhdbh657h648h58fh555h698h556h559hfh65ch75h85h675q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67dlv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-84f54757c7-wbhlc_openstack(8115e0ca-7198-41ac-bed9-5186d95819c9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:12 crc kubenswrapper[4706]: E1206 05:46:12.028325 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-84f54757c7-wbhlc" podUID="8115e0ca-7198-41ac-bed9-5186d95819c9" Dec 06 05:46:15 crc kubenswrapper[4706]: W1206 05:46:15.387923 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8fc4f0d3_7ebd_42d4_b0f3_cb0b5974fd0f.slice/crio-986f3fb70013c4ffa0b04b4d013887727671fff00e793994ba7cbb66cbbbc4e2 WatchSource:0}: Error finding container 986f3fb70013c4ffa0b04b4d013887727671fff00e793994ba7cbb66cbbbc4e2: Status 404 returned error can't find the container with id 986f3fb70013c4ffa0b04b4d013887727671fff00e793994ba7cbb66cbbbc4e2 Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.477193 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.584574 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-dns-svc\") pod \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.584738 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-nb\") pod \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.584780 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-config\") pod \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.584833 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kbvh\" (UniqueName: \"kubernetes.io/projected/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-kube-api-access-4kbvh\") pod \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.584886 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-sb\") pod \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\" (UID: \"1a54d701-bfc3-4f6e-acc3-b64b50e91d30\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.604091 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-kube-api-access-4kbvh" (OuterVolumeSpecName: "kube-api-access-4kbvh") pod "1a54d701-bfc3-4f6e-acc3-b64b50e91d30" (UID: "1a54d701-bfc3-4f6e-acc3-b64b50e91d30"). InnerVolumeSpecName "kube-api-access-4kbvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.627307 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a54d701-bfc3-4f6e-acc3-b64b50e91d30" (UID: "1a54d701-bfc3-4f6e-acc3-b64b50e91d30"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.628193 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a54d701-bfc3-4f6e-acc3-b64b50e91d30" (UID: "1a54d701-bfc3-4f6e-acc3-b64b50e91d30"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.633936 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-config" (OuterVolumeSpecName: "config") pod "1a54d701-bfc3-4f6e-acc3-b64b50e91d30" (UID: "1a54d701-bfc3-4f6e-acc3-b64b50e91d30"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.639289 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a54d701-bfc3-4f6e-acc3-b64b50e91d30" (UID: "1a54d701-bfc3-4f6e-acc3-b64b50e91d30"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.687424 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.687478 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kbvh\" (UniqueName: \"kubernetes.io/projected/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-kube-api-access-4kbvh\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.687492 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.687501 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.687509 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a54d701-bfc3-4f6e-acc3-b64b50e91d30-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: E1206 05:46:15.784182 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Dec 06 05:46:15 crc kubenswrapper[4706]: E1206 05:46:15.784460 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n78h66dh56h78h9fh59dh564h65h5bfh589h575h7h676h548h676h75h55fh5b6h55bh5dch68fh5dh54dh5hbbh5ddh7h684h56bh665h8dh64dq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j5sgs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(b78581a5-4314-4209-967f-715fa91ee6a7): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.820309 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.890764 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-glgr6\" (UniqueName: \"kubernetes.io/projected/b6b061c1-b58e-4c07-961f-150c13cd6d39-kube-api-access-glgr6\") pod \"b6b061c1-b58e-4c07-961f-150c13cd6d39\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.891519 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-fernet-keys\") pod \"b6b061c1-b58e-4c07-961f-150c13cd6d39\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.891592 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-config-data\") pod \"b6b061c1-b58e-4c07-961f-150c13cd6d39\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.891661 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-scripts\") pod \"b6b061c1-b58e-4c07-961f-150c13cd6d39\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.891695 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-credential-keys\") pod \"b6b061c1-b58e-4c07-961f-150c13cd6d39\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.892134 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-combined-ca-bundle\") pod \"b6b061c1-b58e-4c07-961f-150c13cd6d39\" (UID: \"b6b061c1-b58e-4c07-961f-150c13cd6d39\") " Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.897719 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6b061c1-b58e-4c07-961f-150c13cd6d39-kube-api-access-glgr6" (OuterVolumeSpecName: "kube-api-access-glgr6") pod "b6b061c1-b58e-4c07-961f-150c13cd6d39" (UID: "b6b061c1-b58e-4c07-961f-150c13cd6d39"). InnerVolumeSpecName "kube-api-access-glgr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.898655 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b6b061c1-b58e-4c07-961f-150c13cd6d39" (UID: "b6b061c1-b58e-4c07-961f-150c13cd6d39"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.899587 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-scripts" (OuterVolumeSpecName: "scripts") pod "b6b061c1-b58e-4c07-961f-150c13cd6d39" (UID: "b6b061c1-b58e-4c07-961f-150c13cd6d39"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.900420 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b6b061c1-b58e-4c07-961f-150c13cd6d39" (UID: "b6b061c1-b58e-4c07-961f-150c13cd6d39"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.919210 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6b061c1-b58e-4c07-961f-150c13cd6d39" (UID: "b6b061c1-b58e-4c07-961f-150c13cd6d39"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.919618 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-config-data" (OuterVolumeSpecName: "config-data") pod "b6b061c1-b58e-4c07-961f-150c13cd6d39" (UID: "b6b061c1-b58e-4c07-961f-150c13cd6d39"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.994818 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-glgr6\" (UniqueName: \"kubernetes.io/projected/b6b061c1-b58e-4c07-961f-150c13cd6d39-kube-api-access-glgr6\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.994861 4706 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.994873 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.994881 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.994890 4706 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:15 crc kubenswrapper[4706]: I1206 05:46:15.994900 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6b061c1-b58e-4c07-961f-150c13cd6d39-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.044801 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-8p245" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.108:5353: i/o timeout" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.302562 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-gmgr6"] Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.309204 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-gmgr6"] Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.316510 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-8p245" event={"ID":"1a54d701-bfc3-4f6e-acc3-b64b50e91d30","Type":"ContainerDied","Data":"50528f97c90e230dd4f6c61fd8a8fd896acfab6903d065f086d232de6a3062a9"} Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.316550 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-8p245" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.316561 4706 scope.go:117] "RemoveContainer" containerID="4a5d58f6418f0596d2f8097adc7df29b2a513be60819a058e4cd4d12db366016" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.318541 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7605f8e439c382d83dde4749bc8c90c16c4c16e232e647f3d08836c7a39c2928" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.318595 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-gmgr6" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.322013 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8f474c4b8-xgvj4" event={"ID":"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f","Type":"ContainerStarted","Data":"986f3fb70013c4ffa0b04b4d013887727671fff00e793994ba7cbb66cbbbc4e2"} Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.344954 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-8p245"] Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.352826 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-8p245"] Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.418710 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-7xkwl"] Dec 06 05:46:16 crc kubenswrapper[4706]: E1206 05:46:16.419231 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.419248 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" Dec 06 05:46:16 crc kubenswrapper[4706]: E1206 05:46:16.419277 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6b061c1-b58e-4c07-961f-150c13cd6d39" containerName="keystone-bootstrap" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.419289 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6b061c1-b58e-4c07-961f-150c13cd6d39" containerName="keystone-bootstrap" Dec 06 05:46:16 crc kubenswrapper[4706]: E1206 05:46:16.419302 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="init" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.419309 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="init" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.419538 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" containerName="dnsmasq-dns" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.419563 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6b061c1-b58e-4c07-961f-150c13cd6d39" containerName="keystone-bootstrap" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.420265 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.423881 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.424113 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.424610 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.426964 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.427615 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsz48" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.429289 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7xkwl"] Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.512943 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xh4g7\" (UniqueName: \"kubernetes.io/projected/965980f7-e73f-450a-b431-61e071a0361f-kube-api-access-xh4g7\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.512990 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-fernet-keys\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.513033 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-credential-keys\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.513082 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-combined-ca-bundle\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.513103 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-config-data\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.513126 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-scripts\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.614324 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-fernet-keys\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.614397 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-credential-keys\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.614442 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-combined-ca-bundle\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.614463 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-config-data\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.614487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-scripts\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.614563 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xh4g7\" (UniqueName: \"kubernetes.io/projected/965980f7-e73f-450a-b431-61e071a0361f-kube-api-access-xh4g7\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.920222 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xh4g7\" (UniqueName: \"kubernetes.io/projected/965980f7-e73f-450a-b431-61e071a0361f-kube-api-access-xh4g7\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.920230 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-credential-keys\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.920275 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-fernet-keys\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.920326 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-scripts\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.920495 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-config-data\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:16 crc kubenswrapper[4706]: I1206 05:46:16.922966 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-combined-ca-bundle\") pod \"keystone-bootstrap-7xkwl\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:17 crc kubenswrapper[4706]: I1206 05:46:17.055950 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:18 crc kubenswrapper[4706]: I1206 05:46:18.047018 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a54d701-bfc3-4f6e-acc3-b64b50e91d30" path="/var/lib/kubelet/pods/1a54d701-bfc3-4f6e-acc3-b64b50e91d30/volumes" Dec 06 05:46:18 crc kubenswrapper[4706]: I1206 05:46:18.047952 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6b061c1-b58e-4c07-961f-150c13cd6d39" path="/var/lib/kubelet/pods/b6b061c1-b58e-4c07-961f-150c13cd6d39/volumes" Dec 06 05:46:28 crc kubenswrapper[4706]: E1206 05:46:28.646712 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Dec 06 05:46:28 crc kubenswrapper[4706]: E1206 05:46:28.647458 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-br92b,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-bs4sf_openstack(2a7ff6dd-4101-4650-a9b5-af050055f631): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:28 crc kubenswrapper[4706]: E1206 05:46:28.648632 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-bs4sf" podUID="2a7ff6dd-4101-4650-a9b5-af050055f631" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.666498 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.675126 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.685470 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866630 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwtx8\" (UniqueName: \"kubernetes.io/projected/b5574c4c-5438-4eca-bf23-03972c42720a-kube-api-access-wwtx8\") pod \"b5574c4c-5438-4eca-bf23-03972c42720a\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866712 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18221185-8d46-4fa8-8a4c-5fdfba2ef814-horizon-secret-key\") pod \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866797 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18221185-8d46-4fa8-8a4c-5fdfba2ef814-logs\") pod \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866837 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-scripts\") pod \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866874 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-scripts\") pod \"b5574c4c-5438-4eca-bf23-03972c42720a\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866901 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8115e0ca-7198-41ac-bed9-5186d95819c9-logs\") pod \"8115e0ca-7198-41ac-bed9-5186d95819c9\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866928 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5574c4c-5438-4eca-bf23-03972c42720a-horizon-secret-key\") pod \"b5574c4c-5438-4eca-bf23-03972c42720a\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.866971 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-config-data\") pod \"8115e0ca-7198-41ac-bed9-5186d95819c9\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867032 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-scripts\") pod \"8115e0ca-7198-41ac-bed9-5186d95819c9\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867107 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67dlv\" (UniqueName: \"kubernetes.io/projected/8115e0ca-7198-41ac-bed9-5186d95819c9-kube-api-access-67dlv\") pod \"8115e0ca-7198-41ac-bed9-5186d95819c9\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867154 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8115e0ca-7198-41ac-bed9-5186d95819c9-horizon-secret-key\") pod \"8115e0ca-7198-41ac-bed9-5186d95819c9\" (UID: \"8115e0ca-7198-41ac-bed9-5186d95819c9\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867205 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tbwj6\" (UniqueName: \"kubernetes.io/projected/18221185-8d46-4fa8-8a4c-5fdfba2ef814-kube-api-access-tbwj6\") pod \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867236 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-config-data\") pod \"b5574c4c-5438-4eca-bf23-03972c42720a\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867274 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-config-data\") pod \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\" (UID: \"18221185-8d46-4fa8-8a4c-5fdfba2ef814\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867296 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5574c4c-5438-4eca-bf23-03972c42720a-logs\") pod \"b5574c4c-5438-4eca-bf23-03972c42720a\" (UID: \"b5574c4c-5438-4eca-bf23-03972c42720a\") " Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.867971 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5574c4c-5438-4eca-bf23-03972c42720a-logs" (OuterVolumeSpecName: "logs") pod "b5574c4c-5438-4eca-bf23-03972c42720a" (UID: "b5574c4c-5438-4eca-bf23-03972c42720a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.868124 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/18221185-8d46-4fa8-8a4c-5fdfba2ef814-logs" (OuterVolumeSpecName: "logs") pod "18221185-8d46-4fa8-8a4c-5fdfba2ef814" (UID: "18221185-8d46-4fa8-8a4c-5fdfba2ef814"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.868250 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-scripts" (OuterVolumeSpecName: "scripts") pod "b5574c4c-5438-4eca-bf23-03972c42720a" (UID: "b5574c4c-5438-4eca-bf23-03972c42720a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.868577 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-scripts" (OuterVolumeSpecName: "scripts") pod "18221185-8d46-4fa8-8a4c-5fdfba2ef814" (UID: "18221185-8d46-4fa8-8a4c-5fdfba2ef814"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.868691 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-config-data" (OuterVolumeSpecName: "config-data") pod "8115e0ca-7198-41ac-bed9-5186d95819c9" (UID: "8115e0ca-7198-41ac-bed9-5186d95819c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.868989 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8115e0ca-7198-41ac-bed9-5186d95819c9-logs" (OuterVolumeSpecName: "logs") pod "8115e0ca-7198-41ac-bed9-5186d95819c9" (UID: "8115e0ca-7198-41ac-bed9-5186d95819c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.870143 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-config-data" (OuterVolumeSpecName: "config-data") pod "b5574c4c-5438-4eca-bf23-03972c42720a" (UID: "b5574c4c-5438-4eca-bf23-03972c42720a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.870246 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-scripts" (OuterVolumeSpecName: "scripts") pod "8115e0ca-7198-41ac-bed9-5186d95819c9" (UID: "8115e0ca-7198-41ac-bed9-5186d95819c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.870881 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-config-data" (OuterVolumeSpecName: "config-data") pod "18221185-8d46-4fa8-8a4c-5fdfba2ef814" (UID: "18221185-8d46-4fa8-8a4c-5fdfba2ef814"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.873259 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8115e0ca-7198-41ac-bed9-5186d95819c9-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "8115e0ca-7198-41ac-bed9-5186d95819c9" (UID: "8115e0ca-7198-41ac-bed9-5186d95819c9"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.873452 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5574c4c-5438-4eca-bf23-03972c42720a-kube-api-access-wwtx8" (OuterVolumeSpecName: "kube-api-access-wwtx8") pod "b5574c4c-5438-4eca-bf23-03972c42720a" (UID: "b5574c4c-5438-4eca-bf23-03972c42720a"). InnerVolumeSpecName "kube-api-access-wwtx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.874896 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8115e0ca-7198-41ac-bed9-5186d95819c9-kube-api-access-67dlv" (OuterVolumeSpecName: "kube-api-access-67dlv") pod "8115e0ca-7198-41ac-bed9-5186d95819c9" (UID: "8115e0ca-7198-41ac-bed9-5186d95819c9"). InnerVolumeSpecName "kube-api-access-67dlv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.880587 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18221185-8d46-4fa8-8a4c-5fdfba2ef814-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "18221185-8d46-4fa8-8a4c-5fdfba2ef814" (UID: "18221185-8d46-4fa8-8a4c-5fdfba2ef814"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.881393 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5574c4c-5438-4eca-bf23-03972c42720a-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "b5574c4c-5438-4eca-bf23-03972c42720a" (UID: "b5574c4c-5438-4eca-bf23-03972c42720a"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.886185 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18221185-8d46-4fa8-8a4c-5fdfba2ef814-kube-api-access-tbwj6" (OuterVolumeSpecName: "kube-api-access-tbwj6") pod "18221185-8d46-4fa8-8a4c-5fdfba2ef814" (UID: "18221185-8d46-4fa8-8a4c-5fdfba2ef814"). InnerVolumeSpecName "kube-api-access-tbwj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969511 4706 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/b5574c4c-5438-4eca-bf23-03972c42720a-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969547 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969557 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8115e0ca-7198-41ac-bed9-5186d95819c9-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969566 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67dlv\" (UniqueName: \"kubernetes.io/projected/8115e0ca-7198-41ac-bed9-5186d95819c9-kube-api-access-67dlv\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969577 4706 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8115e0ca-7198-41ac-bed9-5186d95819c9-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969585 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tbwj6\" (UniqueName: \"kubernetes.io/projected/18221185-8d46-4fa8-8a4c-5fdfba2ef814-kube-api-access-tbwj6\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969594 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969603 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969612 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5574c4c-5438-4eca-bf23-03972c42720a-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969619 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwtx8\" (UniqueName: \"kubernetes.io/projected/b5574c4c-5438-4eca-bf23-03972c42720a-kube-api-access-wwtx8\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969627 4706 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/18221185-8d46-4fa8-8a4c-5fdfba2ef814-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969635 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/18221185-8d46-4fa8-8a4c-5fdfba2ef814-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969645 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/18221185-8d46-4fa8-8a4c-5fdfba2ef814-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969653 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/b5574c4c-5438-4eca-bf23-03972c42720a-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:28 crc kubenswrapper[4706]: I1206 05:46:28.969661 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8115e0ca-7198-41ac-bed9-5186d95819c9-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.435541 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-594b46c997-9b6gw" Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.435546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-594b46c997-9b6gw" event={"ID":"18221185-8d46-4fa8-8a4c-5fdfba2ef814","Type":"ContainerDied","Data":"66c9d22a4ba6156b5383b6a64344ae27eace99b5a7a9ba604925ece99838ed19"} Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.436784 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84f54757c7-wbhlc" event={"ID":"8115e0ca-7198-41ac-bed9-5186d95819c9","Type":"ContainerDied","Data":"1ff83f11e54a05e27d05137858faf59adfc08dd66090ab9cd45c6498c50351d7"} Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.436886 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f54757c7-wbhlc" Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.440919 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58b8554b5c-s766w" event={"ID":"b5574c4c-5438-4eca-bf23-03972c42720a","Type":"ContainerDied","Data":"875c70a0e43b5f3d685b351b3a4f4ab6b8ba54b881f25eb44d55e8908339f40a"} Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.440973 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58b8554b5c-s766w" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.443494 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-bs4sf" podUID="2a7ff6dd-4101-4650-a9b5-af050055f631" Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.521405 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-594b46c997-9b6gw"] Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.527743 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-594b46c997-9b6gw"] Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.548776 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84f54757c7-wbhlc"] Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.558167 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-84f54757c7-wbhlc"] Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.577146 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-58b8554b5c-s766w"] Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.585094 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-58b8554b5c-s766w"] Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.754075 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.754254 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s57cs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-pjt6m_openstack(bc067583-4394-4fa3-86fc-d6e626ec0f18): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.755471 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-pjt6m" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.764992 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.765023 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.764992 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-swift-container:current-podified" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.765139 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8jr6c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-mzfvf_openstack(4f0ced3b-4b02-4ce1-935a-af7cc2e01346): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.765143 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n67h9bh57h55h65bh58hfh696h64ch59fh86h59h546h6dh658h66ch567h5c7h658h574h67dh58fh5b7h5d8h6h597h67bh679h67chdch5b5h564q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fxl84,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-7f979b84f6-hzq85_openstack(ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.765241 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:container-server,Image:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,Command:[/usr/bin/swift-container-server /etc/swift/container-server.conf.d -v],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:container,HostPort:0,ContainerPort:6201,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5b7h56h9dh94h67bh697h95h55hbh555h556h675h5fdh57dh579h5fbh64fh5c9h687hb6h678h5d4h549h54h98h8ch564h5bh5bch55dhc8hf8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:swift,ReadOnly:false,MountPath:/srv/node/pv,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:etc-swift,ReadOnly:false,MountPath:/etc/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:cache,ReadOnly:false,MountPath:/var/cache/swift,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:lock,ReadOnly:false,MountPath:/var/lock,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b2szw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42445,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-storage-0_openstack(91f74906-ec70-4b0c-a657-d075d18f488b): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.766385 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-mzfvf" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.766950 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" Dec 06 05:46:29 crc kubenswrapper[4706]: I1206 05:46:29.778282 4706 scope.go:117] "RemoveContainer" containerID="4d1722cc4d9af4ad56f634aa9cf0e2ff148679f7d4baeea91160b6fb76f8b4d3" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.778653 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="91f74906-ec70-4b0c-a657-d075d18f488b" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.831324 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.831663 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8z98j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-csbkx_openstack(7bec3465-219b-4c57-83a9-aed4c78d1483): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 05:46:29 crc kubenswrapper[4706]: E1206 05:46:29.833509 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-csbkx" podUID="7bec3465-219b-4c57-83a9-aed4c78d1483" Dec 06 05:46:30 crc kubenswrapper[4706]: I1206 05:46:30.045410 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18221185-8d46-4fa8-8a4c-5fdfba2ef814" path="/var/lib/kubelet/pods/18221185-8d46-4fa8-8a4c-5fdfba2ef814/volumes" Dec 06 05:46:30 crc kubenswrapper[4706]: I1206 05:46:30.046451 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8115e0ca-7198-41ac-bed9-5186d95819c9" path="/var/lib/kubelet/pods/8115e0ca-7198-41ac-bed9-5186d95819c9/volumes" Dec 06 05:46:30 crc kubenswrapper[4706]: I1206 05:46:30.047068 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5574c4c-5438-4eca-bf23-03972c42720a" path="/var/lib/kubelet/pods/b5574c4c-5438-4eca-bf23-03972c42720a/volumes" Dec 06 05:46:30 crc kubenswrapper[4706]: I1206 05:46:30.197006 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-7xkwl"] Dec 06 05:46:30 crc kubenswrapper[4706]: W1206 05:46:30.397142 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod965980f7_e73f_450a_b431_61e071a0361f.slice/crio-edd191991da379c1f2797d1d37f562d566f27908de449f364c68495675855c98 WatchSource:0}: Error finding container edd191991da379c1f2797d1d37f562d566f27908de449f364c68495675855c98: Status 404 returned error can't find the container with id edd191991da379c1f2797d1d37f562d566f27908de449f364c68495675855c98 Dec 06 05:46:30 crc kubenswrapper[4706]: I1206 05:46:30.451957 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7xkwl" event={"ID":"965980f7-e73f-450a-b431-61e071a0361f","Type":"ContainerStarted","Data":"edd191991da379c1f2797d1d37f562d566f27908de449f364c68495675855c98"} Dec 06 05:46:30 crc kubenswrapper[4706]: E1206 05:46:30.770738 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-csbkx" podUID="7bec3465-219b-4c57-83a9-aed4c78d1483" Dec 06 05:46:31 crc kubenswrapper[4706]: E1206 05:46:31.092694 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="91f74906-ec70-4b0c-a657-d075d18f488b" Dec 06 05:46:31 crc kubenswrapper[4706]: I1206 05:46:31.465010 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f979b84f6-hzq85" event={"ID":"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf","Type":"ContainerStarted","Data":"5a6bfebf319d4eb3f9bbfd4b2c9f92dbf46c782a519f3895cca98b4760ce3f3b"} Dec 06 05:46:31 crc kubenswrapper[4706]: I1206 05:46:31.467651 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8f474c4b8-xgvj4" event={"ID":"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f","Type":"ContainerStarted","Data":"58056e9e8c51b9987f00a42e85f33973d87d8fcadacf9f36dd7e5d0c33a24aed"} Dec 06 05:46:31 crc kubenswrapper[4706]: I1206 05:46:31.475184 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7xkwl" event={"ID":"965980f7-e73f-450a-b431-61e071a0361f","Type":"ContainerStarted","Data":"bca96de5277a5c6bc2de53305b2e4cdf26ffdbbc140afb17297974ae84b20815"} Dec 06 05:46:31 crc kubenswrapper[4706]: I1206 05:46:31.477929 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerStarted","Data":"3e418495c0765bf929d12275fccecfa54d63852330f9665591cf76176cc4eaab"} Dec 06 05:46:31 crc kubenswrapper[4706]: I1206 05:46:31.497417 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-7xkwl" podStartSLOduration=15.497401423 podStartE2EDuration="15.497401423s" podCreationTimestamp="2025-12-06 05:46:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:46:31.494301379 +0000 UTC m=+1613.822125333" watchObservedRunningTime="2025-12-06 05:46:31.497401423 +0000 UTC m=+1613.825225367" Dec 06 05:46:32 crc kubenswrapper[4706]: I1206 05:46:32.489266 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f979b84f6-hzq85" event={"ID":"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf","Type":"ContainerStarted","Data":"4690ec4d5b8ee63e514a007ab5967eb43caa526a3ad17a05261a250f9e8efa62"} Dec 06 05:46:32 crc kubenswrapper[4706]: I1206 05:46:32.491467 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-8f474c4b8-xgvj4" event={"ID":"8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f","Type":"ContainerStarted","Data":"5a6d99378a91b9a7c3fd9188f3a1bab1324e872af5c02059dca07b82575e275f"} Dec 06 05:46:32 crc kubenswrapper[4706]: I1206 05:46:32.538472 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-8f474c4b8-xgvj4" podStartSLOduration=28.836450007 podStartE2EDuration="44.538453066s" podCreationTimestamp="2025-12-06 05:45:48 +0000 UTC" firstStartedPulling="2025-12-06 05:46:15.389726889 +0000 UTC m=+1597.717550833" lastFinishedPulling="2025-12-06 05:46:31.091729938 +0000 UTC m=+1613.419553892" observedRunningTime="2025-12-06 05:46:32.537723286 +0000 UTC m=+1614.865547240" watchObservedRunningTime="2025-12-06 05:46:32.538453066 +0000 UTC m=+1614.866277010" Dec 06 05:46:32 crc kubenswrapper[4706]: I1206 05:46:32.546973 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7f979b84f6-hzq85" podStartSLOduration=-9223371992.307833 podStartE2EDuration="44.546942356s" podCreationTimestamp="2025-12-06 05:45:48 +0000 UTC" firstStartedPulling="2025-12-06 05:46:11.25266124 +0000 UTC m=+1593.580485224" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:46:32.520212093 +0000 UTC m=+1614.848036027" watchObservedRunningTime="2025-12-06 05:46:32.546942356 +0000 UTC m=+1614.874766320" Dec 06 05:46:36 crc kubenswrapper[4706]: I1206 05:46:36.524014 4706 generic.go:334] "Generic (PLEG): container finished" podID="965980f7-e73f-450a-b431-61e071a0361f" containerID="bca96de5277a5c6bc2de53305b2e4cdf26ffdbbc140afb17297974ae84b20815" exitCode=0 Dec 06 05:46:36 crc kubenswrapper[4706]: I1206 05:46:36.524084 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7xkwl" event={"ID":"965980f7-e73f-450a-b431-61e071a0361f","Type":"ContainerDied","Data":"bca96de5277a5c6bc2de53305b2e4cdf26ffdbbc140afb17297974ae84b20815"} Dec 06 05:46:38 crc kubenswrapper[4706]: I1206 05:46:38.985843 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:46:38 crc kubenswrapper[4706]: I1206 05:46:38.987191 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:46:39 crc kubenswrapper[4706]: I1206 05:46:39.049174 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:46:39 crc kubenswrapper[4706]: I1206 05:46:39.049960 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.352382 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.547302 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-scripts\") pod \"965980f7-e73f-450a-b431-61e071a0361f\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.548037 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xh4g7\" (UniqueName: \"kubernetes.io/projected/965980f7-e73f-450a-b431-61e071a0361f-kube-api-access-xh4g7\") pod \"965980f7-e73f-450a-b431-61e071a0361f\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.548157 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-fernet-keys\") pod \"965980f7-e73f-450a-b431-61e071a0361f\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.548765 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-credential-keys\") pod \"965980f7-e73f-450a-b431-61e071a0361f\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.548858 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-combined-ca-bundle\") pod \"965980f7-e73f-450a-b431-61e071a0361f\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.548914 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-config-data\") pod \"965980f7-e73f-450a-b431-61e071a0361f\" (UID: \"965980f7-e73f-450a-b431-61e071a0361f\") " Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.553023 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/965980f7-e73f-450a-b431-61e071a0361f-kube-api-access-xh4g7" (OuterVolumeSpecName: "kube-api-access-xh4g7") pod "965980f7-e73f-450a-b431-61e071a0361f" (UID: "965980f7-e73f-450a-b431-61e071a0361f"). InnerVolumeSpecName "kube-api-access-xh4g7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.552374 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-scripts" (OuterVolumeSpecName: "scripts") pod "965980f7-e73f-450a-b431-61e071a0361f" (UID: "965980f7-e73f-450a-b431-61e071a0361f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.553413 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "965980f7-e73f-450a-b431-61e071a0361f" (UID: "965980f7-e73f-450a-b431-61e071a0361f"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.557254 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "965980f7-e73f-450a-b431-61e071a0361f" (UID: "965980f7-e73f-450a-b431-61e071a0361f"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.573833 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-config-data" (OuterVolumeSpecName: "config-data") pod "965980f7-e73f-450a-b431-61e071a0361f" (UID: "965980f7-e73f-450a-b431-61e071a0361f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.585654 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-7xkwl" event={"ID":"965980f7-e73f-450a-b431-61e071a0361f","Type":"ContainerDied","Data":"edd191991da379c1f2797d1d37f562d566f27908de449f364c68495675855c98"} Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.586891 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edd191991da379c1f2797d1d37f562d566f27908de449f364c68495675855c98" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.585717 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-7xkwl" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.588071 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "965980f7-e73f-450a-b431-61e071a0361f" (UID: "965980f7-e73f-450a-b431-61e071a0361f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.651712 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.651747 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.651760 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.651772 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xh4g7\" (UniqueName: \"kubernetes.io/projected/965980f7-e73f-450a-b431-61e071a0361f-kube-api-access-xh4g7\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.651783 4706 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:42 crc kubenswrapper[4706]: I1206 05:46:42.651794 4706 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/965980f7-e73f-450a-b431-61e071a0361f-credential-keys\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.461926 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6c6f7f7c88-ptmf7"] Dec 06 05:46:43 crc kubenswrapper[4706]: E1206 05:46:43.462593 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965980f7-e73f-450a-b431-61e071a0361f" containerName="keystone-bootstrap" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.462605 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="965980f7-e73f-450a-b431-61e071a0361f" containerName="keystone-bootstrap" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.462787 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="965980f7-e73f-450a-b431-61e071a0361f" containerName="keystone-bootstrap" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.463351 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.465933 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.467159 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-jsz48" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.469029 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.469058 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.469076 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.472767 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.482427 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c6f7f7c88-ptmf7"] Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.569550 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-public-tls-certs\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.569633 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-scripts\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.569710 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-combined-ca-bundle\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.569923 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-config-data\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.569973 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-fernet-keys\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.570000 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-credential-keys\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.570080 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-internal-tls-certs\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.570131 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnlwn\" (UniqueName: \"kubernetes.io/projected/0cbad2bc-87d3-4f51-aed8-36d386af56eb-kube-api-access-wnlwn\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.594879 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerStarted","Data":"65315d027723533d8bce7becb1dfb4a37263a5442e8bf46b7c6775f2cb032184"} Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.596801 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bs4sf" event={"ID":"2a7ff6dd-4101-4650-a9b5-af050055f631","Type":"ContainerStarted","Data":"607fcf298a4deeae981f6df41e2639129c5e0b5fa0542c0178fbfeb81fd05716"} Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.618000 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-bs4sf" podStartSLOduration=2.755127855 podStartE2EDuration="1m3.617982238s" podCreationTimestamp="2025-12-06 05:45:40 +0000 UTC" firstStartedPulling="2025-12-06 05:45:41.618219063 +0000 UTC m=+1563.946043007" lastFinishedPulling="2025-12-06 05:46:42.481073446 +0000 UTC m=+1624.808897390" observedRunningTime="2025-12-06 05:46:43.613523927 +0000 UTC m=+1625.941347861" watchObservedRunningTime="2025-12-06 05:46:43.617982238 +0000 UTC m=+1625.945806182" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.671965 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-config-data\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672016 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-fernet-keys\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672038 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-credential-keys\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672120 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-internal-tls-certs\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672156 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnlwn\" (UniqueName: \"kubernetes.io/projected/0cbad2bc-87d3-4f51-aed8-36d386af56eb-kube-api-access-wnlwn\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672248 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-public-tls-certs\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672289 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-scripts\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.672325 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-combined-ca-bundle\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.677916 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-public-tls-certs\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.677964 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-internal-tls-certs\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.678445 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-credential-keys\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.685289 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-config-data\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.686221 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-scripts\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.688156 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-combined-ca-bundle\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.695958 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnlwn\" (UniqueName: \"kubernetes.io/projected/0cbad2bc-87d3-4f51-aed8-36d386af56eb-kube-api-access-wnlwn\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.699314 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0cbad2bc-87d3-4f51-aed8-36d386af56eb-fernet-keys\") pod \"keystone-6c6f7f7c88-ptmf7\" (UID: \"0cbad2bc-87d3-4f51-aed8-36d386af56eb\") " pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:43 crc kubenswrapper[4706]: I1206 05:46:43.781019 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:44 crc kubenswrapper[4706]: E1206 05:46:44.038243 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-mzfvf" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" Dec 06 05:46:44 crc kubenswrapper[4706]: E1206 05:46:44.038770 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-pjt6m" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" Dec 06 05:46:44 crc kubenswrapper[4706]: I1206 05:46:44.217580 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c6f7f7c88-ptmf7"] Dec 06 05:46:44 crc kubenswrapper[4706]: I1206 05:46:44.605854 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c6f7f7c88-ptmf7" event={"ID":"0cbad2bc-87d3-4f51-aed8-36d386af56eb","Type":"ContainerStarted","Data":"14583f2494f2c70d6e34c85baf6b88be5c3944aff8cfa5be38e36bd880ad2b4c"} Dec 06 05:46:45 crc kubenswrapper[4706]: E1206 05:46:45.042152 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"container-server\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-replicator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-auditor\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\", failed to \"StartContainer\" for \"container-updater\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-swift-container:current-podified\\\"\"]" pod="openstack/swift-storage-0" podUID="91f74906-ec70-4b0c-a657-d075d18f488b" Dec 06 05:46:45 crc kubenswrapper[4706]: I1206 05:46:45.615632 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c6f7f7c88-ptmf7" event={"ID":"0cbad2bc-87d3-4f51-aed8-36d386af56eb","Type":"ContainerStarted","Data":"ff2007479f170e37ca24cecd507d7d96212e6d0689b3f7a83b62b503349b94dc"} Dec 06 05:46:45 crc kubenswrapper[4706]: I1206 05:46:45.616342 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:46:45 crc kubenswrapper[4706]: I1206 05:46:45.639857 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6c6f7f7c88-ptmf7" podStartSLOduration=2.639834486 podStartE2EDuration="2.639834486s" podCreationTimestamp="2025-12-06 05:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:46:45.63333616 +0000 UTC m=+1627.961160114" watchObservedRunningTime="2025-12-06 05:46:45.639834486 +0000 UTC m=+1627.967658440" Dec 06 05:46:48 crc kubenswrapper[4706]: I1206 05:46:48.987936 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.142:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.142:8443: connect: connection refused" Dec 06 05:46:49 crc kubenswrapper[4706]: I1206 05:46:49.050149 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-8f474c4b8-xgvj4" podUID="8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Dec 06 05:46:54 crc kubenswrapper[4706]: I1206 05:46:54.721877 4706 generic.go:334] "Generic (PLEG): container finished" podID="2a7ff6dd-4101-4650-a9b5-af050055f631" containerID="607fcf298a4deeae981f6df41e2639129c5e0b5fa0542c0178fbfeb81fd05716" exitCode=0 Dec 06 05:46:54 crc kubenswrapper[4706]: I1206 05:46:54.721939 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bs4sf" event={"ID":"2a7ff6dd-4101-4650-a9b5-af050055f631","Type":"ContainerDied","Data":"607fcf298a4deeae981f6df41e2639129c5e0b5fa0542c0178fbfeb81fd05716"} Dec 06 05:46:55 crc kubenswrapper[4706]: E1206 05:46:55.555598 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" Dec 06 05:46:55 crc kubenswrapper[4706]: I1206 05:46:55.735244 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="ceilometer-notification-agent" containerID="cri-o://3e418495c0765bf929d12275fccecfa54d63852330f9665591cf76176cc4eaab" gracePeriod=30 Dec 06 05:46:55 crc kubenswrapper[4706]: I1206 05:46:55.735563 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerStarted","Data":"3b3c7e3507ff1c7df96a4d07363401332810964ed37e227589edec0ce08801e4"} Dec 06 05:46:55 crc kubenswrapper[4706]: I1206 05:46:55.735606 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:46:55 crc kubenswrapper[4706]: I1206 05:46:55.735644 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="proxy-httpd" containerID="cri-o://3b3c7e3507ff1c7df96a4d07363401332810964ed37e227589edec0ce08801e4" gracePeriod=30 Dec 06 05:46:55 crc kubenswrapper[4706]: I1206 05:46:55.735714 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="sg-core" containerID="cri-o://65315d027723533d8bce7becb1dfb4a37263a5442e8bf46b7c6775f2cb032184" gracePeriod=30 Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.003307 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.098169 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-db-sync-config-data\") pod \"2a7ff6dd-4101-4650-a9b5-af050055f631\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.098500 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-combined-ca-bundle\") pod \"2a7ff6dd-4101-4650-a9b5-af050055f631\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.098578 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br92b\" (UniqueName: \"kubernetes.io/projected/2a7ff6dd-4101-4650-a9b5-af050055f631-kube-api-access-br92b\") pod \"2a7ff6dd-4101-4650-a9b5-af050055f631\" (UID: \"2a7ff6dd-4101-4650-a9b5-af050055f631\") " Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.105103 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a7ff6dd-4101-4650-a9b5-af050055f631-kube-api-access-br92b" (OuterVolumeSpecName: "kube-api-access-br92b") pod "2a7ff6dd-4101-4650-a9b5-af050055f631" (UID: "2a7ff6dd-4101-4650-a9b5-af050055f631"). InnerVolumeSpecName "kube-api-access-br92b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.105236 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "2a7ff6dd-4101-4650-a9b5-af050055f631" (UID: "2a7ff6dd-4101-4650-a9b5-af050055f631"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.130508 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2a7ff6dd-4101-4650-a9b5-af050055f631" (UID: "2a7ff6dd-4101-4650-a9b5-af050055f631"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.200354 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br92b\" (UniqueName: \"kubernetes.io/projected/2a7ff6dd-4101-4650-a9b5-af050055f631-kube-api-access-br92b\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.200591 4706 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.200672 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2a7ff6dd-4101-4650-a9b5-af050055f631-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.744588 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-bs4sf" event={"ID":"2a7ff6dd-4101-4650-a9b5-af050055f631","Type":"ContainerDied","Data":"93ffc157166b6aaf28c61dc6c82844d0321f12ef16a0d61aa7d6b891ce3b6038"} Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.744918 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93ffc157166b6aaf28c61dc6c82844d0321f12ef16a0d61aa7d6b891ce3b6038" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.744603 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-bs4sf" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.746221 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-csbkx" event={"ID":"7bec3465-219b-4c57-83a9-aed4c78d1483","Type":"ContainerStarted","Data":"8ac0b108392f0f9075ab778bb7034eb663c81084180d87d2ff18e5f093aadb51"} Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.748185 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-pjt6m" event={"ID":"bc067583-4394-4fa3-86fc-d6e626ec0f18","Type":"ContainerStarted","Data":"f4c3a5a604c3d6b739b1cdb3098d206b09058eebb8b42cf5898c707a28617b45"} Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.752391 4706 generic.go:334] "Generic (PLEG): container finished" podID="b78581a5-4314-4209-967f-715fa91ee6a7" containerID="3b3c7e3507ff1c7df96a4d07363401332810964ed37e227589edec0ce08801e4" exitCode=0 Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.752435 4706 generic.go:334] "Generic (PLEG): container finished" podID="b78581a5-4314-4209-967f-715fa91ee6a7" containerID="65315d027723533d8bce7becb1dfb4a37263a5442e8bf46b7c6775f2cb032184" exitCode=2 Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.752461 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerDied","Data":"3b3c7e3507ff1c7df96a4d07363401332810964ed37e227589edec0ce08801e4"} Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.752490 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerDied","Data":"65315d027723533d8bce7becb1dfb4a37263a5442e8bf46b7c6775f2cb032184"} Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.775528 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-csbkx" podStartSLOduration=6.92518971 podStartE2EDuration="1m16.775509722s" podCreationTimestamp="2025-12-06 05:45:40 +0000 UTC" firstStartedPulling="2025-12-06 05:45:41.233247439 +0000 UTC m=+1563.561071383" lastFinishedPulling="2025-12-06 05:46:51.083567451 +0000 UTC m=+1633.411391395" observedRunningTime="2025-12-06 05:46:56.768992506 +0000 UTC m=+1639.096816450" watchObservedRunningTime="2025-12-06 05:46:56.775509722 +0000 UTC m=+1639.103333676" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.798519 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-pjt6m" podStartSLOduration=2.59017813 podStartE2EDuration="1m47.798500733s" podCreationTimestamp="2025-12-06 05:45:09 +0000 UTC" firstStartedPulling="2025-12-06 05:45:10.343606415 +0000 UTC m=+1532.671430359" lastFinishedPulling="2025-12-06 05:46:55.551929018 +0000 UTC m=+1637.879752962" observedRunningTime="2025-12-06 05:46:56.792971044 +0000 UTC m=+1639.120795068" watchObservedRunningTime="2025-12-06 05:46:56.798500733 +0000 UTC m=+1639.126324677" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.888389 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-99698bc47-f5twk"] Dec 06 05:46:56 crc kubenswrapper[4706]: E1206 05:46:56.888746 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a7ff6dd-4101-4650-a9b5-af050055f631" containerName="barbican-db-sync" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.888762 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a7ff6dd-4101-4650-a9b5-af050055f631" containerName="barbican-db-sync" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.888957 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a7ff6dd-4101-4650-a9b5-af050055f631" containerName="barbican-db-sync" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.895545 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.899085 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.899601 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.900178 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nknn7" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.906678 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-99698bc47-f5twk"] Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.913362 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-5985b9fc68-gt5hx"] Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.914653 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.920401 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.940164 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5985b9fc68-gt5hx"] Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.994379 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-65c688cf8c-8nrkz"] Dec 06 05:46:56 crc kubenswrapper[4706]: I1206 05:46:56.995751 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.003790 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c688cf8c-8nrkz"] Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013680 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-strlc\" (UniqueName: \"kubernetes.io/projected/7ea78fc3-49cb-46cb-a450-c3c0990135fb-kube-api-access-strlc\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013736 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99fd71cd-f273-4e5f-91e1-2816f523b9ce-logs\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013769 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ea78fc3-49cb-46cb-a450-c3c0990135fb-logs\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013799 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbjqr\" (UniqueName: \"kubernetes.io/projected/99fd71cd-f273-4e5f-91e1-2816f523b9ce-kube-api-access-jbjqr\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013822 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-combined-ca-bundle\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013849 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-config-data\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013869 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-config-data\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013887 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-config-data-custom\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013903 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-combined-ca-bundle\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.013928 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-config-data-custom\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119143 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-config-data-custom\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119199 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzf45\" (UniqueName: \"kubernetes.io/projected/e5ec6ece-bd35-490c-be82-2b5506a0ca73-kube-api-access-xzf45\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119234 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-combined-ca-bundle\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119291 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-sb\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119354 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-config-data-custom\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119528 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-strlc\" (UniqueName: \"kubernetes.io/projected/7ea78fc3-49cb-46cb-a450-c3c0990135fb-kube-api-access-strlc\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119565 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-dns-svc\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119596 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99fd71cd-f273-4e5f-91e1-2816f523b9ce-logs\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119647 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ea78fc3-49cb-46cb-a450-c3c0990135fb-logs\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119688 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-config\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119712 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-nb\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119742 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbjqr\" (UniqueName: \"kubernetes.io/projected/99fd71cd-f273-4e5f-91e1-2816f523b9ce-kube-api-access-jbjqr\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119776 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-combined-ca-bundle\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119831 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-config-data\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.119857 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-config-data\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.129885 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/99fd71cd-f273-4e5f-91e1-2816f523b9ce-logs\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.130256 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7ea78fc3-49cb-46cb-a450-c3c0990135fb-logs\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.136241 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-config-data-custom\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.138684 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5d9ccff6c8-kmw5z"] Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.140127 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.142542 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.143671 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-config-data\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.146875 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-config-data\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.147472 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-config-data-custom\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.155531 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/99fd71cd-f273-4e5f-91e1-2816f523b9ce-combined-ca-bundle\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.156037 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7ea78fc3-49cb-46cb-a450-c3c0990135fb-combined-ca-bundle\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.162975 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5d9ccff6c8-kmw5z"] Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.163519 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-strlc\" (UniqueName: \"kubernetes.io/projected/7ea78fc3-49cb-46cb-a450-c3c0990135fb-kube-api-access-strlc\") pod \"barbican-worker-99698bc47-f5twk\" (UID: \"7ea78fc3-49cb-46cb-a450-c3c0990135fb\") " pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.167643 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbjqr\" (UniqueName: \"kubernetes.io/projected/99fd71cd-f273-4e5f-91e1-2816f523b9ce-kube-api-access-jbjqr\") pod \"barbican-keystone-listener-5985b9fc68-gt5hx\" (UID: \"99fd71cd-f273-4e5f-91e1-2816f523b9ce\") " pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.215449 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-99698bc47-f5twk" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.221909 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-combined-ca-bundle\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.221961 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpxf4\" (UniqueName: \"kubernetes.io/projected/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-kube-api-access-mpxf4\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.221984 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-dns-svc\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222007 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222073 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-config\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222094 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-nb\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222122 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-logs\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222152 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzf45\" (UniqueName: \"kubernetes.io/projected/e5ec6ece-bd35-490c-be82-2b5506a0ca73-kube-api-access-xzf45\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222175 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-sb\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222209 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data-custom\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.222952 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-dns-svc\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.223446 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-config\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.223900 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-nb\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.224702 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-sb\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.232714 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.269346 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzf45\" (UniqueName: \"kubernetes.io/projected/e5ec6ece-bd35-490c-be82-2b5506a0ca73-kube-api-access-xzf45\") pod \"dnsmasq-dns-65c688cf8c-8nrkz\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.321744 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.324781 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpxf4\" (UniqueName: \"kubernetes.io/projected/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-kube-api-access-mpxf4\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.324819 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.324884 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-logs\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.324940 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data-custom\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.324979 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-combined-ca-bundle\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.337800 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.342638 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data-custom\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.346614 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-combined-ca-bundle\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.349188 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-logs\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.355806 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpxf4\" (UniqueName: \"kubernetes.io/projected/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-kube-api-access-mpxf4\") pod \"barbican-api-5d9ccff6c8-kmw5z\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.454469 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.812933 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-99698bc47-f5twk"] Dec 06 05:46:57 crc kubenswrapper[4706]: W1206 05:46:57.824582 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7ea78fc3_49cb_46cb_a450_c3c0990135fb.slice/crio-d4d9b909f59230fc0f0f021d7c8dbab21bf47e627edfbf2ff91eb1a6be4e67a1 WatchSource:0}: Error finding container d4d9b909f59230fc0f0f021d7c8dbab21bf47e627edfbf2ff91eb1a6be4e67a1: Status 404 returned error can't find the container with id d4d9b909f59230fc0f0f021d7c8dbab21bf47e627edfbf2ff91eb1a6be4e67a1 Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.980973 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-5985b9fc68-gt5hx"] Dec 06 05:46:57 crc kubenswrapper[4706]: I1206 05:46:57.995405 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-65c688cf8c-8nrkz"] Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.177439 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5d9ccff6c8-kmw5z"] Dec 06 05:46:58 crc kubenswrapper[4706]: W1206 05:46:58.180608 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod687c17f9_1a75_4b68_8d97_1285ad8f9e3d.slice/crio-b9cc647db6f664a9ccb5e207dea2d75eaa1d359175b77700488d27e41f545fcc WatchSource:0}: Error finding container b9cc647db6f664a9ccb5e207dea2d75eaa1d359175b77700488d27e41f545fcc: Status 404 returned error can't find the container with id b9cc647db6f664a9ccb5e207dea2d75eaa1d359175b77700488d27e41f545fcc Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.768694 4706 generic.go:334] "Generic (PLEG): container finished" podID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerID="491e9ce4e4eede7a03ecfda6d57b00eb9713ef18ba5aee1a96059b803fcc537c" exitCode=0 Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.768737 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" event={"ID":"e5ec6ece-bd35-490c-be82-2b5506a0ca73","Type":"ContainerDied","Data":"491e9ce4e4eede7a03ecfda6d57b00eb9713ef18ba5aee1a96059b803fcc537c"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.769143 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" event={"ID":"e5ec6ece-bd35-490c-be82-2b5506a0ca73","Type":"ContainerStarted","Data":"7a36310c595e6db14be7f830e93160bc72aaf8f15ead6830c0239d96a2ecd1fe"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.773157 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerStarted","Data":"26ad472cbd33633f7af1b4c342cc43095dec2824c00ca1a73fbc5510bcad3d5c"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.773184 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerStarted","Data":"b9cc647db6f664a9ccb5e207dea2d75eaa1d359175b77700488d27e41f545fcc"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.807872 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"aff045c9cb00725a6cf597661b0c60dc6a22dd448de240ee29fae77e10ff228c"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.807915 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"dee4ce17d35982eca09b6e8ab804b4bc9faf5f27203a243036652b4378fd2e12"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.807925 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"3f06b7118210b0baa4c9c655ac6024f8226f78c60ea8dcbba57f47ab8282b8dc"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.807935 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"91f74906-ec70-4b0c-a657-d075d18f488b","Type":"ContainerStarted","Data":"0a1f0937fcc6133bdfa2e2fd174f9c711e69e7e6bf6e4c2bd96388332a4dc11b"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.818470 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" event={"ID":"99fd71cd-f273-4e5f-91e1-2816f523b9ce","Type":"ContainerStarted","Data":"be4dcec75290dc5d502948f43b8c53564dee57807cd9f94b0f1069fd5a5d3a12"} Dec 06 05:46:58 crc kubenswrapper[4706]: I1206 05:46:58.820881 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-99698bc47-f5twk" event={"ID":"7ea78fc3-49cb-46cb-a450-c3c0990135fb","Type":"ContainerStarted","Data":"d4d9b909f59230fc0f0f021d7c8dbab21bf47e627edfbf2ff91eb1a6be4e67a1"} Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.764935 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5d57ffb9bb-t86s7"] Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.766949 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.768971 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.771705 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.785738 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5d57ffb9bb-t86s7"] Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.887548 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-config-data\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.887590 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-config-data-custom\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.888172 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-combined-ca-bundle\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.888204 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxqht\" (UniqueName: \"kubernetes.io/projected/fb031ade-7dae-40f8-a748-8842d00f6a37-kube-api-access-bxqht\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.888258 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-public-tls-certs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.888300 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb031ade-7dae-40f8-a748-8842d00f6a37-logs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.888330 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-internal-tls-certs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990311 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-config-data\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990367 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-config-data-custom\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990479 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-combined-ca-bundle\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990534 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxqht\" (UniqueName: \"kubernetes.io/projected/fb031ade-7dae-40f8-a748-8842d00f6a37-kube-api-access-bxqht\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990553 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-public-tls-certs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990573 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb031ade-7dae-40f8-a748-8842d00f6a37-logs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.990599 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-internal-tls-certs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.991181 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fb031ade-7dae-40f8-a748-8842d00f6a37-logs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.996540 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-combined-ca-bundle\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.996761 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-public-tls-certs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:46:59 crc kubenswrapper[4706]: I1206 05:46:59.997292 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-config-data-custom\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.000025 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-config-data\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.010519 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxqht\" (UniqueName: \"kubernetes.io/projected/fb031ade-7dae-40f8-a748-8842d00f6a37-kube-api-access-bxqht\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.011346 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb031ade-7dae-40f8-a748-8842d00f6a37-internal-tls-certs\") pod \"barbican-api-5d57ffb9bb-t86s7\" (UID: \"fb031ade-7dae-40f8-a748-8842d00f6a37\") " pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.180253 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.113562096 podStartE2EDuration="2m25.180236206s" podCreationTimestamp="2025-12-06 05:44:35 +0000 UTC" firstStartedPulling="2025-12-06 05:45:09.521780873 +0000 UTC m=+1531.849604817" lastFinishedPulling="2025-12-06 05:46:57.588454983 +0000 UTC m=+1639.916278927" observedRunningTime="2025-12-06 05:46:59.932153064 +0000 UTC m=+1642.259977018" watchObservedRunningTime="2025-12-06 05:47:00.180236206 +0000 UTC m=+1642.508060150" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.181655 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.183828 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c688cf8c-8nrkz"] Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.217922 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-v2sfb"] Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.219736 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.228166 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.236969 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-v2sfb"] Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.310741 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-nb\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.310924 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-config\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.310973 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-swift-storage-0\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.311033 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-sb\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.311223 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4hpzw\" (UniqueName: \"kubernetes.io/projected/c6cd9b30-8090-48ed-9c45-bca903c380ee-kube-api-access-4hpzw\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.311383 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-svc\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.413376 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-swift-storage-0\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.413442 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-sb\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.413523 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4hpzw\" (UniqueName: \"kubernetes.io/projected/c6cd9b30-8090-48ed-9c45-bca903c380ee-kube-api-access-4hpzw\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.413560 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-svc\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.413598 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-nb\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.413647 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-config\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.414485 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-config\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.415759 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-swift-storage-0\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.416279 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-sb\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.417133 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-svc\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.417590 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-nb\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.432660 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4hpzw\" (UniqueName: \"kubernetes.io/projected/c6cd9b30-8090-48ed-9c45-bca903c380ee-kube-api-access-4hpzw\") pod \"dnsmasq-dns-7979dc8455-v2sfb\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.538588 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.899653 4706 generic.go:334] "Generic (PLEG): container finished" podID="b78581a5-4314-4209-967f-715fa91ee6a7" containerID="3e418495c0765bf929d12275fccecfa54d63852330f9665591cf76176cc4eaab" exitCode=0 Dec 06 05:47:00 crc kubenswrapper[4706]: I1206 05:47:00.899699 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerDied","Data":"3e418495c0765bf929d12275fccecfa54d63852330f9665591cf76176cc4eaab"} Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.212113 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.361646 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.914475 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" event={"ID":"e5ec6ece-bd35-490c-be82-2b5506a0ca73","Type":"ContainerStarted","Data":"dbcb129e6fbe3a5494846b32fbca9045a751463d6f56aa374759258c8d825204"} Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.914832 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerName="dnsmasq-dns" containerID="cri-o://dbcb129e6fbe3a5494846b32fbca9045a751463d6f56aa374759258c8d825204" gracePeriod=10 Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.914901 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.925891 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerStarted","Data":"002b48091c527ced03a528aab54bc6329b702948d4609b828f02ee3aaa0d2e01"} Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.926967 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.926997 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.966871 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" podStartSLOduration=5.966854169 podStartE2EDuration="5.966854169s" podCreationTimestamp="2025-12-06 05:46:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:01.964308621 +0000 UTC m=+1644.292132585" watchObservedRunningTime="2025-12-06 05:47:01.966854169 +0000 UTC m=+1644.294678113" Dec 06 05:47:01 crc kubenswrapper[4706]: I1206 05:47:01.994627 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podStartSLOduration=4.994611309 podStartE2EDuration="4.994611309s" podCreationTimestamp="2025-12-06 05:46:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:01.993742465 +0000 UTC m=+1644.321566419" watchObservedRunningTime="2025-12-06 05:47:01.994611309 +0000 UTC m=+1644.322435253" Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.377979 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-v2sfb"] Dec 06 05:47:02 crc kubenswrapper[4706]: W1206 05:47:02.513531 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6cd9b30_8090_48ed_9c45_bca903c380ee.slice/crio-2805bc2829a6baf80e9716c9962a9741b8f536bd6a50339f4af0dec45e48e405 WatchSource:0}: Error finding container 2805bc2829a6baf80e9716c9962a9741b8f536bd6a50339f4af0dec45e48e405: Status 404 returned error can't find the container with id 2805bc2829a6baf80e9716c9962a9741b8f536bd6a50339f4af0dec45e48e405 Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.584761 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5d57ffb9bb-t86s7"] Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.938431 4706 generic.go:334] "Generic (PLEG): container finished" podID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerID="dbcb129e6fbe3a5494846b32fbca9045a751463d6f56aa374759258c8d825204" exitCode=0 Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.938490 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" event={"ID":"e5ec6ece-bd35-490c-be82-2b5506a0ca73","Type":"ContainerDied","Data":"dbcb129e6fbe3a5494846b32fbca9045a751463d6f56aa374759258c8d825204"} Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.941289 4706 generic.go:334] "Generic (PLEG): container finished" podID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerID="002b48091c527ced03a528aab54bc6329b702948d4609b828f02ee3aaa0d2e01" exitCode=1 Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.941343 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerDied","Data":"002b48091c527ced03a528aab54bc6329b702948d4609b828f02ee3aaa0d2e01"} Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.941919 4706 scope.go:117] "RemoveContainer" containerID="002b48091c527ced03a528aab54bc6329b702948d4609b828f02ee3aaa0d2e01" Dec 06 05:47:02 crc kubenswrapper[4706]: I1206 05:47:02.944712 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" event={"ID":"c6cd9b30-8090-48ed-9c45-bca903c380ee","Type":"ContainerStarted","Data":"2805bc2829a6baf80e9716c9962a9741b8f536bd6a50339f4af0dec45e48e405"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.168676 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.182950 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.221564 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.292978 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-8f474c4b8-xgvj4" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.293914 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-sb\") pod \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294065 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-scripts\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294120 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-config\") pod \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294190 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-combined-ca-bundle\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294222 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-run-httpd\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294277 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-dns-svc\") pod \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294316 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-nb\") pod \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294346 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-sg-core-conf-yaml\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294383 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5sgs\" (UniqueName: \"kubernetes.io/projected/b78581a5-4314-4209-967f-715fa91ee6a7-kube-api-access-j5sgs\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294429 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xzf45\" (UniqueName: \"kubernetes.io/projected/e5ec6ece-bd35-490c-be82-2b5506a0ca73-kube-api-access-xzf45\") pod \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\" (UID: \"e5ec6ece-bd35-490c-be82-2b5506a0ca73\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294473 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-config-data\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.294509 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-log-httpd\") pod \"b78581a5-4314-4209-967f-715fa91ee6a7\" (UID: \"b78581a5-4314-4209-967f-715fa91ee6a7\") " Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.298142 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.299444 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b78581a5-4314-4209-967f-715fa91ee6a7-kube-api-access-j5sgs" (OuterVolumeSpecName: "kube-api-access-j5sgs") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "kube-api-access-j5sgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.301791 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5ec6ece-bd35-490c-be82-2b5506a0ca73-kube-api-access-xzf45" (OuterVolumeSpecName: "kube-api-access-xzf45") pod "e5ec6ece-bd35-490c-be82-2b5506a0ca73" (UID: "e5ec6ece-bd35-490c-be82-2b5506a0ca73"). InnerVolumeSpecName "kube-api-access-xzf45". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.302116 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.337371 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-scripts" (OuterVolumeSpecName: "scripts") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.397770 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5sgs\" (UniqueName: \"kubernetes.io/projected/b78581a5-4314-4209-967f-715fa91ee6a7-kube-api-access-j5sgs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.397796 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xzf45\" (UniqueName: \"kubernetes.io/projected/e5ec6ece-bd35-490c-be82-2b5506a0ca73-kube-api-access-xzf45\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.397806 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.397816 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.397827 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b78581a5-4314-4209-967f-715fa91ee6a7-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.404859 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f979b84f6-hzq85"] Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.431337 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-config" (OuterVolumeSpecName: "config") pod "e5ec6ece-bd35-490c-be82-2b5506a0ca73" (UID: "e5ec6ece-bd35-490c-be82-2b5506a0ca73"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.454972 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.457796 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.482522 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e5ec6ece-bd35-490c-be82-2b5506a0ca73" (UID: "e5ec6ece-bd35-490c-be82-2b5506a0ca73"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.499383 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.499415 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.512085 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.602135 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.679861 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e5ec6ece-bd35-490c-be82-2b5506a0ca73" (UID: "e5ec6ece-bd35-490c-be82-2b5506a0ca73"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.704124 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.711157 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.755763 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e5ec6ece-bd35-490c-be82-2b5506a0ca73" (UID: "e5ec6ece-bd35-490c-be82-2b5506a0ca73"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.794257 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-config-data" (OuterVolumeSpecName: "config-data") pod "b78581a5-4314-4209-967f-715fa91ee6a7" (UID: "b78581a5-4314-4209-967f-715fa91ee6a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.805966 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.806006 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78581a5-4314-4209-967f-715fa91ee6a7-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.806017 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e5ec6ece-bd35-490c-be82-2b5506a0ca73-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.957271 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.957272 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b78581a5-4314-4209-967f-715fa91ee6a7","Type":"ContainerDied","Data":"c4b2c8919f7ec895a03cabd990fdd4b8e679c7a4dd8ad2ef8bbc3003a0e74d6d"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.957413 4706 scope.go:117] "RemoveContainer" containerID="3b3c7e3507ff1c7df96a4d07363401332810964ed37e227589edec0ce08801e4" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.959132 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-99698bc47-f5twk" event={"ID":"7ea78fc3-49cb-46cb-a450-c3c0990135fb","Type":"ContainerStarted","Data":"d1e2eb885de8d6a03e1bc3266c47d43aac57469356fa16a2c8f810e4d213fe91"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.962363 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d57ffb9bb-t86s7" event={"ID":"fb031ade-7dae-40f8-a748-8842d00f6a37","Type":"ContainerStarted","Data":"4b5bb82fea0bb31544e563fc1e0f81a5996aae4178f2e6653593b25b00955844"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.962394 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d57ffb9bb-t86s7" event={"ID":"fb031ade-7dae-40f8-a748-8842d00f6a37","Type":"ContainerStarted","Data":"96d80a397dfbb209c864ab929286bfa687b468855bd18d00a329f48720e4ac7d"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.962409 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d57ffb9bb-t86s7" event={"ID":"fb031ade-7dae-40f8-a748-8842d00f6a37","Type":"ContainerStarted","Data":"e733713c3994537e52001bd8b5f6b49bc365396fdcbbd81b9138fca900b3c7a5"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.962716 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.962744 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.966739 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.966741 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-65c688cf8c-8nrkz" event={"ID":"e5ec6ece-bd35-490c-be82-2b5506a0ca73","Type":"ContainerDied","Data":"7a36310c595e6db14be7f830e93160bc72aaf8f15ead6830c0239d96a2ecd1fe"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.970608 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerStarted","Data":"5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.970948 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.971895 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.973117 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mzfvf" event={"ID":"4f0ced3b-4b02-4ce1-935a-af7cc2e01346","Type":"ContainerStarted","Data":"0956ef6adc5ea811ae355357ff7be56603cafeab53c5ac9b353d08f2502893a2"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.983281 4706 generic.go:334] "Generic (PLEG): container finished" podID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerID="cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95" exitCode=0 Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.983862 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" event={"ID":"c6cd9b30-8090-48ed-9c45-bca903c380ee","Type":"ContainerDied","Data":"cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.987969 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon-log" containerID="cri-o://5a6bfebf319d4eb3f9bbfd4b2c9f92dbf46c782a519f3895cca98b4760ce3f3b" gracePeriod=30 Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.989766 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" event={"ID":"99fd71cd-f273-4e5f-91e1-2816f523b9ce","Type":"ContainerStarted","Data":"dcdda3f339b2084fb3930296fc6b6f0c41a03e142418dee08b4bd97185c55e36"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.989808 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" event={"ID":"99fd71cd-f273-4e5f-91e1-2816f523b9ce","Type":"ContainerStarted","Data":"96d48664d18936a41b59ca0952d10b96d64ddcdb3ce6e5ad9f8a5e27a2d0e31b"} Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.989883 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" containerID="cri-o://4690ec4d5b8ee63e514a007ab5967eb43caa526a3ad17a05261a250f9e8efa62" gracePeriod=30 Dec 06 05:47:03 crc kubenswrapper[4706]: I1206 05:47:03.992344 4706 scope.go:117] "RemoveContainer" containerID="65315d027723533d8bce7becb1dfb4a37263a5442e8bf46b7c6775f2cb032184" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.008411 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-mzfvf" podStartSLOduration=2.456256091 podStartE2EDuration="1m24.008387768s" podCreationTimestamp="2025-12-06 05:45:40 +0000 UTC" firstStartedPulling="2025-12-06 05:45:41.630350931 +0000 UTC m=+1563.958174875" lastFinishedPulling="2025-12-06 05:47:03.182482608 +0000 UTC m=+1645.510306552" observedRunningTime="2025-12-06 05:47:04.001831581 +0000 UTC m=+1646.329655525" watchObservedRunningTime="2025-12-06 05:47:04.008387768 +0000 UTC m=+1646.336211712" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.011147 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podStartSLOduration=5.011139542 podStartE2EDuration="5.011139542s" podCreationTimestamp="2025-12-06 05:46:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:03.979316953 +0000 UTC m=+1646.307140917" watchObservedRunningTime="2025-12-06 05:47:04.011139542 +0000 UTC m=+1646.338963496" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.045302 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-5985b9fc68-gt5hx" podStartSLOduration=2.875507661 podStartE2EDuration="8.045280586s" podCreationTimestamp="2025-12-06 05:46:56 +0000 UTC" firstStartedPulling="2025-12-06 05:46:57.998635723 +0000 UTC m=+1640.326459667" lastFinishedPulling="2025-12-06 05:47:03.168408648 +0000 UTC m=+1645.496232592" observedRunningTime="2025-12-06 05:47:04.036393175 +0000 UTC m=+1646.364217139" watchObservedRunningTime="2025-12-06 05:47:04.045280586 +0000 UTC m=+1646.373104530" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.115134 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.141276 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.162144 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:04 crc kubenswrapper[4706]: E1206 05:47:04.172998 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="ceilometer-notification-agent" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173032 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="ceilometer-notification-agent" Dec 06 05:47:04 crc kubenswrapper[4706]: E1206 05:47:04.173065 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="proxy-httpd" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173074 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="proxy-httpd" Dec 06 05:47:04 crc kubenswrapper[4706]: E1206 05:47:04.173105 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerName="dnsmasq-dns" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173155 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerName="dnsmasq-dns" Dec 06 05:47:04 crc kubenswrapper[4706]: E1206 05:47:04.173170 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="sg-core" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173177 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="sg-core" Dec 06 05:47:04 crc kubenswrapper[4706]: E1206 05:47:04.173185 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerName="init" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173190 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerName="init" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173442 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="proxy-httpd" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173458 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="ceilometer-notification-agent" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173476 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" containerName="dnsmasq-dns" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.173492 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" containerName="sg-core" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.180977 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.182163 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.185000 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.185000 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.246742 4706 scope.go:117] "RemoveContainer" containerID="3e418495c0765bf929d12275fccecfa54d63852330f9665591cf76176cc4eaab" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.254488 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-65c688cf8c-8nrkz"] Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.267197 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-65c688cf8c-8nrkz"] Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321209 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321332 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-config-data\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321420 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321507 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss69s\" (UniqueName: \"kubernetes.io/projected/2df640a0-8f4d-4743-84f9-32a9e187d282-kube-api-access-ss69s\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321736 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-log-httpd\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321777 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-run-httpd\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.321864 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-scripts\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.339969 4706 scope.go:117] "RemoveContainer" containerID="dbcb129e6fbe3a5494846b32fbca9045a751463d6f56aa374759258c8d825204" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.371099 4706 scope.go:117] "RemoveContainer" containerID="491e9ce4e4eede7a03ecfda6d57b00eb9713ef18ba5aee1a96059b803fcc537c" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423380 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-log-httpd\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423428 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-run-httpd\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423473 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-scripts\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423512 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423596 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-config-data\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423620 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423661 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss69s\" (UniqueName: \"kubernetes.io/projected/2df640a0-8f4d-4743-84f9-32a9e187d282-kube-api-access-ss69s\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.423976 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-run-httpd\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.424207 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-log-httpd\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.432753 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.434749 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-scripts\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.436757 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.437781 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-config-data\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.454751 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss69s\" (UniqueName: \"kubernetes.io/projected/2df640a0-8f4d-4743-84f9-32a9e187d282-kube-api-access-ss69s\") pod \"ceilometer-0\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.571021 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:04 crc kubenswrapper[4706]: I1206 05:47:04.997763 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-99698bc47-f5twk" event={"ID":"7ea78fc3-49cb-46cb-a450-c3c0990135fb","Type":"ContainerStarted","Data":"20113087651d16ed6ed792f999a51d8ec9eef8672af64496668747d77d882cc3"} Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.001750 4706 generic.go:334] "Generic (PLEG): container finished" podID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" exitCode=1 Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.001816 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerDied","Data":"5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853"} Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.002348 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.002451 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.002759 4706 scope.go:117] "RemoveContainer" containerID="002b48091c527ced03a528aab54bc6329b702948d4609b828f02ee3aaa0d2e01" Dec 06 05:47:05 crc kubenswrapper[4706]: E1206 05:47:05.002956 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5d9ccff6c8-kmw5z_openstack(687c17f9-1a75-4b68-8d97-1285ad8f9e3d)\"" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.005490 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" event={"ID":"c6cd9b30-8090-48ed-9c45-bca903c380ee","Type":"ContainerStarted","Data":"c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8"} Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.005523 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.039835 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-99698bc47-f5twk" podStartSLOduration=3.699973274 podStartE2EDuration="9.039817922s" podCreationTimestamp="2025-12-06 05:46:56 +0000 UTC" firstStartedPulling="2025-12-06 05:46:57.82712144 +0000 UTC m=+1640.154945384" lastFinishedPulling="2025-12-06 05:47:03.166966088 +0000 UTC m=+1645.494790032" observedRunningTime="2025-12-06 05:47:05.034532809 +0000 UTC m=+1647.362356763" watchObservedRunningTime="2025-12-06 05:47:05.039817922 +0000 UTC m=+1647.367641866" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.078676 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" podStartSLOduration=5.078662551 podStartE2EDuration="5.078662551s" podCreationTimestamp="2025-12-06 05:47:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:05.073765009 +0000 UTC m=+1647.401588953" watchObservedRunningTime="2025-12-06 05:47:05.078662551 +0000 UTC m=+1647.406486495" Dec 06 05:47:05 crc kubenswrapper[4706]: W1206 05:47:05.127611 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2df640a0_8f4d_4743_84f9_32a9e187d282.slice/crio-9d14beec0e18dccba74258df2468fd74a75cdb577714e91db87790024993c8b1 WatchSource:0}: Error finding container 9d14beec0e18dccba74258df2468fd74a75cdb577714e91db87790024993c8b1: Status 404 returned error can't find the container with id 9d14beec0e18dccba74258df2468fd74a75cdb577714e91db87790024993c8b1 Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.156293 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.403806 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jtjm9"] Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.406231 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.416109 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jtjm9"] Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.547666 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z9x5\" (UniqueName: \"kubernetes.io/projected/05c17326-c953-41d3-97ea-d620f5535013-kube-api-access-5z9x5\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.547731 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-utilities\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.548063 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-catalog-content\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.649996 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-catalog-content\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.650091 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z9x5\" (UniqueName: \"kubernetes.io/projected/05c17326-c953-41d3-97ea-d620f5535013-kube-api-access-5z9x5\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.650139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-utilities\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.650412 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-catalog-content\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.650453 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-utilities\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.682738 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z9x5\" (UniqueName: \"kubernetes.io/projected/05c17326-c953-41d3-97ea-d620f5535013-kube-api-access-5z9x5\") pod \"redhat-operators-jtjm9\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.736679 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.961317 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:47:05 crc kubenswrapper[4706]: I1206 05:47:05.961379 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.035332 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.035405 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:06 crc kubenswrapper[4706]: E1206 05:47:06.035641 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5d9ccff6c8-kmw5z_openstack(687c17f9-1a75-4b68-8d97-1285ad8f9e3d)\"" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.049554 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b78581a5-4314-4209-967f-715fa91ee6a7" path="/var/lib/kubelet/pods/b78581a5-4314-4209-967f-715fa91ee6a7/volumes" Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.058035 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5ec6ece-bd35-490c-be82-2b5506a0ca73" path="/var/lib/kubelet/pods/e5ec6ece-bd35-490c-be82-2b5506a0ca73/volumes" Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.058633 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerStarted","Data":"9d14beec0e18dccba74258df2468fd74a75cdb577714e91db87790024993c8b1"} Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.439443 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jtjm9"] Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.455142 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:06 crc kubenswrapper[4706]: I1206 05:47:06.456246 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.054932 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerStarted","Data":"7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a"} Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.055340 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerStarted","Data":"b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f"} Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.057303 4706 generic.go:334] "Generic (PLEG): container finished" podID="05c17326-c953-41d3-97ea-d620f5535013" containerID="4712bc903ca987b517a63e4c7f2b58f2dc6d2496f0fab7da7dda1beb894316f5" exitCode=0 Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.057346 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtjm9" event={"ID":"05c17326-c953-41d3-97ea-d620f5535013","Type":"ContainerDied","Data":"4712bc903ca987b517a63e4c7f2b58f2dc6d2496f0fab7da7dda1beb894316f5"} Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.057367 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtjm9" event={"ID":"05c17326-c953-41d3-97ea-d620f5535013","Type":"ContainerStarted","Data":"f8b912a16b80fe34f5c18c2b4ac0170a427e9863e3e87cb08f570e8abecee370"} Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.057847 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.057942 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:07 crc kubenswrapper[4706]: E1206 05:47:07.058125 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5d9ccff6c8-kmw5z_openstack(687c17f9-1a75-4b68-8d97-1285ad8f9e3d)\"" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" Dec 06 05:47:07 crc kubenswrapper[4706]: I1206 05:47:07.454993 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:08 crc kubenswrapper[4706]: I1206 05:47:08.091789 4706 generic.go:334] "Generic (PLEG): container finished" podID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerID="4690ec4d5b8ee63e514a007ab5967eb43caa526a3ad17a05261a250f9e8efa62" exitCode=0 Dec 06 05:47:08 crc kubenswrapper[4706]: I1206 05:47:08.096775 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f979b84f6-hzq85" event={"ID":"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf","Type":"ContainerDied","Data":"4690ec4d5b8ee63e514a007ab5967eb43caa526a3ad17a05261a250f9e8efa62"} Dec 06 05:47:08 crc kubenswrapper[4706]: I1206 05:47:08.985437 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.142:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.142:8443: connect: connection refused" Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.104477 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerStarted","Data":"27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7"} Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.107305 4706 generic.go:334] "Generic (PLEG): container finished" podID="05c17326-c953-41d3-97ea-d620f5535013" containerID="83a35db4459ebc38dc1591077e9cfd7dbc13ae5d74efb7d6802ddeb56fddc0f8" exitCode=0 Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.107393 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtjm9" event={"ID":"05c17326-c953-41d3-97ea-d620f5535013","Type":"ContainerDied","Data":"83a35db4459ebc38dc1591077e9cfd7dbc13ae5d74efb7d6802ddeb56fddc0f8"} Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.109031 4706 generic.go:334] "Generic (PLEG): container finished" podID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" containerID="0956ef6adc5ea811ae355357ff7be56603cafeab53c5ac9b353d08f2502893a2" exitCode=0 Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.109108 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mzfvf" event={"ID":"4f0ced3b-4b02-4ce1-935a-af7cc2e01346","Type":"ContainerDied","Data":"0956ef6adc5ea811ae355357ff7be56603cafeab53c5ac9b353d08f2502893a2"} Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.112032 4706 generic.go:334] "Generic (PLEG): container finished" podID="03d1d05b-3978-41bd-a7b6-5c0465432409" containerID="8ab97a3e9911e22c9eec4678e401db9aeeef45fe97e735d2361d29ec47239633" exitCode=0 Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.112075 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9vnhs" event={"ID":"03d1d05b-3978-41bd-a7b6-5c0465432409","Type":"ContainerDied","Data":"8ab97a3e9911e22c9eec4678e401db9aeeef45fe97e735d2361d29ec47239633"} Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.455531 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.455609 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.456225 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.456293 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="barbican-api-log" containerStatusID={"Type":"cri-o","ID":"26ad472cbd33633f7af1b4c342cc43095dec2824c00ca1a73fbc5510bcad3d5c"} pod="openstack/barbican-api-5d9ccff6c8-kmw5z" containerMessage="Container barbican-api-log failed liveness probe, will be restarted" Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.456316 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:09 crc kubenswrapper[4706]: I1206 05:47:09.456336 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" containerID="cri-o://26ad472cbd33633f7af1b4c342cc43095dec2824c00ca1a73fbc5510bcad3d5c" gracePeriod=30 Dec 06 05:47:09 crc kubenswrapper[4706]: E1206 05:47:09.965628 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5d9ccff6c8-kmw5z_openstack(687c17f9-1a75-4b68-8d97-1285ad8f9e3d)\"" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.121922 4706 generic.go:334] "Generic (PLEG): container finished" podID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerID="26ad472cbd33633f7af1b4c342cc43095dec2824c00ca1a73fbc5510bcad3d5c" exitCode=143 Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.121999 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerDied","Data":"26ad472cbd33633f7af1b4c342cc43095dec2824c00ca1a73fbc5510bcad3d5c"} Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.122031 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerStarted","Data":"c65633ba26c87a617f75a17a16ddec504b12ea1e7b16c260be02e0e4178d401e"} Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.122246 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.122681 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:10 crc kubenswrapper[4706]: E1206 05:47:10.123062 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5d9ccff6c8-kmw5z_openstack(687c17f9-1a75-4b68-8d97-1285ad8f9e3d)\"" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.125501 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtjm9" event={"ID":"05c17326-c953-41d3-97ea-d620f5535013","Type":"ContainerStarted","Data":"2beae595b6bfed7e68bbadce1618e970d6f47704636c0fa9ba8bffa9dc2a17cb"} Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.130089 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerStarted","Data":"9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185"} Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.182497 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.705865573 podStartE2EDuration="6.182465273s" podCreationTimestamp="2025-12-06 05:47:04 +0000 UTC" firstStartedPulling="2025-12-06 05:47:05.132114295 +0000 UTC m=+1647.459938239" lastFinishedPulling="2025-12-06 05:47:09.608713995 +0000 UTC m=+1651.936537939" observedRunningTime="2025-12-06 05:47:10.178468076 +0000 UTC m=+1652.506292020" watchObservedRunningTime="2025-12-06 05:47:10.182465273 +0000 UTC m=+1652.510289217" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.207632 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jtjm9" podStartSLOduration=2.52333339 podStartE2EDuration="5.207614633s" podCreationTimestamp="2025-12-06 05:47:05 +0000 UTC" firstStartedPulling="2025-12-06 05:47:07.05985885 +0000 UTC m=+1649.387682794" lastFinishedPulling="2025-12-06 05:47:09.744140073 +0000 UTC m=+1652.071964037" observedRunningTime="2025-12-06 05:47:10.205663481 +0000 UTC m=+1652.533487445" watchObservedRunningTime="2025-12-06 05:47:10.207614633 +0000 UTC m=+1652.535438577" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.543334 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.576570 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mzfvf" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.639663 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8jr6c\" (UniqueName: \"kubernetes.io/projected/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-kube-api-access-8jr6c\") pod \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.639710 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-combined-ca-bundle\") pod \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.639756 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-logs\") pod \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.639771 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-config-data\") pod \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.639841 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-scripts\") pod \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\" (UID: \"4f0ced3b-4b02-4ce1-935a-af7cc2e01346\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.646327 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-logs" (OuterVolumeSpecName: "logs") pod "4f0ced3b-4b02-4ce1-935a-af7cc2e01346" (UID: "4f0ced3b-4b02-4ce1-935a-af7cc2e01346"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.646384 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-fnr2t"] Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.646599 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="dnsmasq-dns" containerID="cri-o://eec80dea3b09176ad51c414c7e11ae845f2ef6d04b169daf5d219ddb9fd1d63b" gracePeriod=10 Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.653191 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-scripts" (OuterVolumeSpecName: "scripts") pod "4f0ced3b-4b02-4ce1-935a-af7cc2e01346" (UID: "4f0ced3b-4b02-4ce1-935a-af7cc2e01346"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.662259 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-kube-api-access-8jr6c" (OuterVolumeSpecName: "kube-api-access-8jr6c") pod "4f0ced3b-4b02-4ce1-935a-af7cc2e01346" (UID: "4f0ced3b-4b02-4ce1-935a-af7cc2e01346"). InnerVolumeSpecName "kube-api-access-8jr6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.701562 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-config-data" (OuterVolumeSpecName: "config-data") pod "4f0ced3b-4b02-4ce1-935a-af7cc2e01346" (UID: "4f0ced3b-4b02-4ce1-935a-af7cc2e01346"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.723206 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4f0ced3b-4b02-4ce1-935a-af7cc2e01346" (UID: "4f0ced3b-4b02-4ce1-935a-af7cc2e01346"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.743101 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8jr6c\" (UniqueName: \"kubernetes.io/projected/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-kube-api-access-8jr6c\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.743132 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.743144 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.743155 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.743164 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4f0ced3b-4b02-4ce1-935a-af7cc2e01346-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.823557 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.140:5353: connect: connection refused" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.827382 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.844260 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4p8bf\" (UniqueName: \"kubernetes.io/projected/03d1d05b-3978-41bd-a7b6-5c0465432409-kube-api-access-4p8bf\") pod \"03d1d05b-3978-41bd-a7b6-5c0465432409\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.844417 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-config\") pod \"03d1d05b-3978-41bd-a7b6-5c0465432409\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.844557 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-combined-ca-bundle\") pod \"03d1d05b-3978-41bd-a7b6-5c0465432409\" (UID: \"03d1d05b-3978-41bd-a7b6-5c0465432409\") " Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.865239 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03d1d05b-3978-41bd-a7b6-5c0465432409-kube-api-access-4p8bf" (OuterVolumeSpecName: "kube-api-access-4p8bf") pod "03d1d05b-3978-41bd-a7b6-5c0465432409" (UID: "03d1d05b-3978-41bd-a7b6-5c0465432409"). InnerVolumeSpecName "kube-api-access-4p8bf". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.870684 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-config" (OuterVolumeSpecName: "config") pod "03d1d05b-3978-41bd-a7b6-5c0465432409" (UID: "03d1d05b-3978-41bd-a7b6-5c0465432409"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.907245 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "03d1d05b-3978-41bd-a7b6-5c0465432409" (UID: "03d1d05b-3978-41bd-a7b6-5c0465432409"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.948575 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.948623 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4p8bf\" (UniqueName: \"kubernetes.io/projected/03d1d05b-3978-41bd-a7b6-5c0465432409-kube-api-access-4p8bf\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:10 crc kubenswrapper[4706]: I1206 05:47:10.948638 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/03d1d05b-3978-41bd-a7b6-5c0465432409-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.139901 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-9vnhs" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.139923 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-9vnhs" event={"ID":"03d1d05b-3978-41bd-a7b6-5c0465432409","Type":"ContainerDied","Data":"c8db130af8dc5b46ef0cb12d6b97c685b0e4d7b8f2a8d3520e843fda5e2b6733"} Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.139971 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8db130af8dc5b46ef0cb12d6b97c685b0e4d7b8f2a8d3520e843fda5e2b6733" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.150407 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.150712 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-mzfvf" event={"ID":"4f0ced3b-4b02-4ce1-935a-af7cc2e01346","Type":"ContainerDied","Data":"728777caa0e2511d8486dd4026f8941930fcf4e4dab320a52969434e39da32ab"} Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.150771 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="728777caa0e2511d8486dd4026f8941930fcf4e4dab320a52969434e39da32ab" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.150741 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-mzfvf" Dec 06 05:47:11 crc kubenswrapper[4706]: E1206 05:47:11.150912 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=barbican-api pod=barbican-api-5d9ccff6c8-kmw5z_openstack(687c17f9-1a75-4b68-8d97-1285ad8f9e3d)\"" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.151451 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.266638 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-789868f976-vz5nh"] Dec 06 05:47:11 crc kubenswrapper[4706]: E1206 05:47:11.267000 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03d1d05b-3978-41bd-a7b6-5c0465432409" containerName="neutron-db-sync" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.267016 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="03d1d05b-3978-41bd-a7b6-5c0465432409" containerName="neutron-db-sync" Dec 06 05:47:11 crc kubenswrapper[4706]: E1206 05:47:11.267032 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" containerName="placement-db-sync" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.267039 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" containerName="placement-db-sync" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.267258 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="03d1d05b-3978-41bd-a7b6-5c0465432409" containerName="neutron-db-sync" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.267281 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" containerName="placement-db-sync" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.268154 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.270197 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.271487 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.271615 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.271778 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.275696 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-wb9tg" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.292083 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-789868f976-vz5nh"] Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358488 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-config-data\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358571 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fkb4l\" (UniqueName: \"kubernetes.io/projected/8507b27e-a504-499e-bfea-e8c0397ff528-kube-api-access-fkb4l\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358609 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-scripts\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358636 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-public-tls-certs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358656 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-internal-tls-certs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358673 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-combined-ca-bundle\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.358702 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8507b27e-a504-499e-bfea-e8c0397ff528-logs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.362537 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-pt598"] Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.364254 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.412350 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-pt598"] Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.466959 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fkb4l\" (UniqueName: \"kubernetes.io/projected/8507b27e-a504-499e-bfea-e8c0397ff528-kube-api-access-fkb4l\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.467027 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-scripts\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.467079 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-public-tls-certs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.467271 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-internal-tls-certs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.467292 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-combined-ca-bundle\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.467323 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8507b27e-a504-499e-bfea-e8c0397ff528-logs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.467396 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-config-data\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.474905 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-combined-ca-bundle\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.478473 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-public-tls-certs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.479843 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-scripts\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.480153 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8507b27e-a504-499e-bfea-e8c0397ff528-logs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.481557 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-internal-tls-certs\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.488749 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8507b27e-a504-499e-bfea-e8c0397ff528-config-data\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.506766 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fkb4l\" (UniqueName: \"kubernetes.io/projected/8507b27e-a504-499e-bfea-e8c0397ff528-kube-api-access-fkb4l\") pod \"placement-789868f976-vz5nh\" (UID: \"8507b27e-a504-499e-bfea-e8c0397ff528\") " pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.569266 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-nb\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.569358 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-swift-storage-0\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.569382 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-config\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.569406 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-svc\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.569429 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-sb\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.569498 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4nh9\" (UniqueName: \"kubernetes.io/projected/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-kube-api-access-v4nh9\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.588842 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.599454 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-565566dfbd-5h6dj"] Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.601015 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.605792 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-6kh5g" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.605948 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.606157 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.606693 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.628148 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-565566dfbd-5h6dj"] Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675046 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-swift-storage-0\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675106 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-config\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675136 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-svc\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675155 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-ovndb-tls-certs\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675179 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-httpd-config\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675199 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-sb\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675269 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4nh9\" (UniqueName: \"kubernetes.io/projected/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-kube-api-access-v4nh9\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675301 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kcqx\" (UniqueName: \"kubernetes.io/projected/4e512237-f0a0-4312-900f-5f8cd066f34c-kube-api-access-4kcqx\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675323 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-nb\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675350 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-combined-ca-bundle\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.675377 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.676262 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-swift-storage-0\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.676788 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-config\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.677239 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-sb\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.677655 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-nb\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.679255 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-svc\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.706817 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4nh9\" (UniqueName: \"kubernetes.io/projected/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-kube-api-access-v4nh9\") pod \"dnsmasq-dns-789c5c5cb7-pt598\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.712433 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.775938 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kcqx\" (UniqueName: \"kubernetes.io/projected/4e512237-f0a0-4312-900f-5f8cd066f34c-kube-api-access-4kcqx\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.776365 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-combined-ca-bundle\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.776396 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.776441 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-ovndb-tls-certs\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.776459 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-httpd-config\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.796985 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-ovndb-tls-certs\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.797358 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-combined-ca-bundle\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.798276 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-httpd-config\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.799777 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.813796 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kcqx\" (UniqueName: \"kubernetes.io/projected/4e512237-f0a0-4312-900f-5f8cd066f34c-kube-api-access-4kcqx\") pod \"neutron-565566dfbd-5h6dj\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:11 crc kubenswrapper[4706]: I1206 05:47:11.876817 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:12 crc kubenswrapper[4706]: I1206 05:47:12.141473 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-789868f976-vz5nh"] Dec 06 05:47:12 crc kubenswrapper[4706]: I1206 05:47:12.170192 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-789868f976-vz5nh" event={"ID":"8507b27e-a504-499e-bfea-e8c0397ff528","Type":"ContainerStarted","Data":"29a35920edb992f01500f0bae49f2161d9fb7b239e6fbdfb756e292334b1bf6a"} Dec 06 05:47:12 crc kubenswrapper[4706]: I1206 05:47:12.175308 4706 generic.go:334] "Generic (PLEG): container finished" podID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerID="eec80dea3b09176ad51c414c7e11ae845f2ef6d04b169daf5d219ddb9fd1d63b" exitCode=0 Dec 06 05:47:12 crc kubenswrapper[4706]: I1206 05:47:12.175467 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" event={"ID":"f77dd76e-3bec-475c-aefd-7d8a7d9fef84","Type":"ContainerDied","Data":"eec80dea3b09176ad51c414c7e11ae845f2ef6d04b169daf5d219ddb9fd1d63b"} Dec 06 05:47:12 crc kubenswrapper[4706]: W1206 05:47:12.287510 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9a30a8df_fdae_4d26_8ff6_b2a6ca5895cb.slice/crio-a41349888dd4856bee8d646221b5e8351270fb478eb1df8c953aa80a61524761 WatchSource:0}: Error finding container a41349888dd4856bee8d646221b5e8351270fb478eb1df8c953aa80a61524761: Status 404 returned error can't find the container with id a41349888dd4856bee8d646221b5e8351270fb478eb1df8c953aa80a61524761 Dec 06 05:47:12 crc kubenswrapper[4706]: I1206 05:47:12.294351 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-pt598"] Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.189267 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" event={"ID":"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb","Type":"ContainerStarted","Data":"a41349888dd4856bee8d646221b5e8351270fb478eb1df8c953aa80a61524761"} Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.706085 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.817868 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-dns-svc\") pod \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.817960 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-config\") pod \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.818001 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-nb\") pod \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.818145 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-sb\") pod \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.818189 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nfnjd\" (UniqueName: \"kubernetes.io/projected/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-kube-api-access-nfnjd\") pod \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\" (UID: \"f77dd76e-3bec-475c-aefd-7d8a7d9fef84\") " Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.836257 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-kube-api-access-nfnjd" (OuterVolumeSpecName: "kube-api-access-nfnjd") pod "f77dd76e-3bec-475c-aefd-7d8a7d9fef84" (UID: "f77dd76e-3bec-475c-aefd-7d8a7d9fef84"). InnerVolumeSpecName "kube-api-access-nfnjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.880965 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-config" (OuterVolumeSpecName: "config") pod "f77dd76e-3bec-475c-aefd-7d8a7d9fef84" (UID: "f77dd76e-3bec-475c-aefd-7d8a7d9fef84"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.881010 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f77dd76e-3bec-475c-aefd-7d8a7d9fef84" (UID: "f77dd76e-3bec-475c-aefd-7d8a7d9fef84"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.892222 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f77dd76e-3bec-475c-aefd-7d8a7d9fef84" (UID: "f77dd76e-3bec-475c-aefd-7d8a7d9fef84"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.899630 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f77dd76e-3bec-475c-aefd-7d8a7d9fef84" (UID: "f77dd76e-3bec-475c-aefd-7d8a7d9fef84"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.920427 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.920456 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.920468 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.920478 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:13 crc kubenswrapper[4706]: I1206 05:47:13.920490 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nfnjd\" (UniqueName: \"kubernetes.io/projected/f77dd76e-3bec-475c-aefd-7d8a7d9fef84-kube-api-access-nfnjd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.194201 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.150:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.194540 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.150:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.203936 4706 generic.go:334] "Generic (PLEG): container finished" podID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerID="bfb790d3faf672e0d647e9b2a8debec8a9273af06752ba604007d2ebb993eede" exitCode=0 Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.204065 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" event={"ID":"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb","Type":"ContainerDied","Data":"bfb790d3faf672e0d647e9b2a8debec8a9273af06752ba604007d2ebb993eede"} Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.206550 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-789868f976-vz5nh" event={"ID":"8507b27e-a504-499e-bfea-e8c0397ff528","Type":"ContainerStarted","Data":"dd7bd910e226519a1c8cee20bc0ea0347c27c68b021c0159bb2e3b15c77136c5"} Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.221111 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" event={"ID":"f77dd76e-3bec-475c-aefd-7d8a7d9fef84","Type":"ContainerDied","Data":"0c43d645cdc4579b3decf790ccfd189a4d9ba8f9e05b595855904b16d80e2224"} Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.221162 4706 scope.go:117] "RemoveContainer" containerID="eec80dea3b09176ad51c414c7e11ae845f2ef6d04b169daf5d219ddb9fd1d63b" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.221241 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-68dcc9cf6f-fnr2t" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.290583 4706 scope.go:117] "RemoveContainer" containerID="b4a0103806729f574d80bb1ea545de98c1342568829f38b075b4f9e596c5a57a" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.291200 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-fnr2t"] Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.301237 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-68dcc9cf6f-fnr2t"] Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.347402 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5c45f4d87f-7sd44"] Dec 06 05:47:14 crc kubenswrapper[4706]: E1206 05:47:14.347824 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="dnsmasq-dns" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.347841 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="dnsmasq-dns" Dec 06 05:47:14 crc kubenswrapper[4706]: E1206 05:47:14.347856 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="init" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.347863 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="init" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.348039 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" containerName="dnsmasq-dns" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.349034 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.352947 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.365835 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c45f4d87f-7sd44"] Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.366322 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534491 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-combined-ca-bundle\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534570 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-httpd-config\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534626 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-ovndb-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534669 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-internal-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534760 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-public-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534806 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-config\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.534855 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcthb\" (UniqueName: \"kubernetes.io/projected/d8a2aaf5-7417-43c4-9562-2df330329adf-kube-api-access-tcthb\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636304 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-config\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636415 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcthb\" (UniqueName: \"kubernetes.io/projected/d8a2aaf5-7417-43c4-9562-2df330329adf-kube-api-access-tcthb\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636494 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-combined-ca-bundle\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636536 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-httpd-config\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636580 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-ovndb-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636615 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-internal-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.636659 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-public-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.650981 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-internal-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.650989 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-httpd-config\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.650988 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-public-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.651590 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-ovndb-tls-certs\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.652918 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-config\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.657749 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8a2aaf5-7417-43c4-9562-2df330329adf-combined-ca-bundle\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.660809 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcthb\" (UniqueName: \"kubernetes.io/projected/d8a2aaf5-7417-43c4-9562-2df330329adf-kube-api-access-tcthb\") pod \"neutron-5c45f4d87f-7sd44\" (UID: \"d8a2aaf5-7417-43c4-9562-2df330329adf\") " pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:14 crc kubenswrapper[4706]: I1206 05:47:14.678507 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.012327 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-565566dfbd-5h6dj"] Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.191231 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.150:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.191285 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.150:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.229776 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-565566dfbd-5h6dj" event={"ID":"4e512237-f0a0-4312-900f-5f8cd066f34c","Type":"ContainerStarted","Data":"8ad46cf6cd5b3fc6b527b3397986530332a54cc9b1096d3c9fee077473030702"} Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.232538 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-789868f976-vz5nh" event={"ID":"8507b27e-a504-499e-bfea-e8c0397ff528","Type":"ContainerStarted","Data":"1327161196cb38306a9d49f74bf195b8a1818b049d37a27a99fcaa5144102d0e"} Dec 06 05:47:15 crc kubenswrapper[4706]: W1206 05:47:15.262214 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8a2aaf5_7417_43c4_9562_2df330329adf.slice/crio-28a892cc98e24413dfb14e56a5bb5a5ac4099b417cae207abce7bd8fbcd28fbc WatchSource:0}: Error finding container 28a892cc98e24413dfb14e56a5bb5a5ac4099b417cae207abce7bd8fbcd28fbc: Status 404 returned error can't find the container with id 28a892cc98e24413dfb14e56a5bb5a5ac4099b417cae207abce7bd8fbcd28fbc Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.262782 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5c45f4d87f-7sd44"] Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.455079 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.737339 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:15 crc kubenswrapper[4706]: I1206 05:47:15.738993 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:16 crc kubenswrapper[4706]: I1206 05:47:16.060035 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f77dd76e-3bec-475c-aefd-7d8a7d9fef84" path="/var/lib/kubelet/pods/f77dd76e-3bec-475c-aefd-7d8a7d9fef84/volumes" Dec 06 05:47:16 crc kubenswrapper[4706]: I1206 05:47:16.104114 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6c6f7f7c88-ptmf7" Dec 06 05:47:16 crc kubenswrapper[4706]: I1206 05:47:16.245709 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c45f4d87f-7sd44" event={"ID":"d8a2aaf5-7417-43c4-9562-2df330329adf","Type":"ContainerStarted","Data":"28a892cc98e24413dfb14e56a5bb5a5ac4099b417cae207abce7bd8fbcd28fbc"} Dec 06 05:47:16 crc kubenswrapper[4706]: I1206 05:47:16.269565 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-789868f976-vz5nh" podStartSLOduration=5.269547637 podStartE2EDuration="5.269547637s" podCreationTimestamp="2025-12-06 05:47:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:16.267717159 +0000 UTC m=+1658.595541113" watchObservedRunningTime="2025-12-06 05:47:16.269547637 +0000 UTC m=+1658.597371581" Dec 06 05:47:16 crc kubenswrapper[4706]: I1206 05:47:16.793524 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jtjm9" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" probeResult="failure" output=< Dec 06 05:47:16 crc kubenswrapper[4706]: timeout: failed to connect service ":50051" within 1s Dec 06 05:47:16 crc kubenswrapper[4706]: > Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.256125 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c45f4d87f-7sd44" event={"ID":"d8a2aaf5-7417-43c4-9562-2df330329adf","Type":"ContainerStarted","Data":"1c4e4d17b1a7f95bbf7822631c5cc401943987a582ac64396d443801ebb19e31"} Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.257939 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" event={"ID":"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb","Type":"ContainerStarted","Data":"194625a5c5d0487f17a2cb5a08096844bbb56ebba4bc3708ca77c89ff6d83399"} Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.259029 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.261083 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-565566dfbd-5h6dj" event={"ID":"4e512237-f0a0-4312-900f-5f8cd066f34c","Type":"ContainerStarted","Data":"0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d"} Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.281333 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" podStartSLOduration=6.281314539 podStartE2EDuration="6.281314539s" podCreationTimestamp="2025-12-06 05:47:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:17.276486169 +0000 UTC m=+1659.604310133" watchObservedRunningTime="2025-12-06 05:47:17.281314539 +0000 UTC m=+1659.609138483" Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.454990 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.819079 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.824430 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.824672 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api-log" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:47:17 crc kubenswrapper[4706]: I1206 05:47:17.837878 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d57ffb9bb-t86s7" podUID="fb031ade-7dae-40f8-a748-8842d00f6a37" containerName="barbican-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.268787 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5c45f4d87f-7sd44" event={"ID":"d8a2aaf5-7417-43c4-9562-2df330329adf","Type":"ContainerStarted","Data":"caf09b9c3993b2f259e38c2c51bf72e79a92d68cf7373d226ee0636396214ee5"} Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.269559 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.270562 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-565566dfbd-5h6dj" event={"ID":"4e512237-f0a0-4312-900f-5f8cd066f34c","Type":"ContainerStarted","Data":"31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6"} Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.294783 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5c45f4d87f-7sd44" podStartSLOduration=4.294746376 podStartE2EDuration="4.294746376s" podCreationTimestamp="2025-12-06 05:47:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:18.284582842 +0000 UTC m=+1660.612406776" watchObservedRunningTime="2025-12-06 05:47:18.294746376 +0000 UTC m=+1660.622570330" Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.322398 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-565566dfbd-5h6dj" podStartSLOduration=7.322379072 podStartE2EDuration="7.322379072s" podCreationTimestamp="2025-12-06 05:47:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:18.317321235 +0000 UTC m=+1660.645145179" watchObservedRunningTime="2025-12-06 05:47:18.322379072 +0000 UTC m=+1660.650203036" Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.455730 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:18 crc kubenswrapper[4706]: I1206 05:47:18.985560 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.142:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.142:8443: connect: connection refused" Dec 06 05:47:19 crc kubenswrapper[4706]: I1206 05:47:19.278496 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:19 crc kubenswrapper[4706]: I1206 05:47:19.993148 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Dec 06 05:47:19 crc kubenswrapper[4706]: I1206 05:47:19.994616 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 06 05:47:19 crc kubenswrapper[4706]: I1206 05:47:19.997235 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Dec 06 05:47:19 crc kubenswrapper[4706]: I1206 05:47:19.997461 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Dec 06 05:47:19 crc kubenswrapper[4706]: I1206 05:47:19.997625 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-g5t8r" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.005374 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.140668 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d5b5a38-b853-47de-ada1-1d7c240e84e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.140724 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nthl\" (UniqueName: \"kubernetes.io/projected/2d5b5a38-b853-47de-ada1-1d7c240e84e4-kube-api-access-4nthl\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.140775 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5b5a38-b853-47de-ada1-1d7c240e84e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.140804 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d5b5a38-b853-47de-ada1-1d7c240e84e4-openstack-config\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.230750 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.243134 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d5b5a38-b853-47de-ada1-1d7c240e84e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.243207 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nthl\" (UniqueName: \"kubernetes.io/projected/2d5b5a38-b853-47de-ada1-1d7c240e84e4-kube-api-access-4nthl\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.243245 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5b5a38-b853-47de-ada1-1d7c240e84e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.243271 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d5b5a38-b853-47de-ada1-1d7c240e84e4-openstack-config\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.244376 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/2d5b5a38-b853-47de-ada1-1d7c240e84e4-openstack-config\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.254672 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5d57ffb9bb-t86s7" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.255997 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2d5b5a38-b853-47de-ada1-1d7c240e84e4-combined-ca-bundle\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.258916 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/2d5b5a38-b853-47de-ada1-1d7c240e84e4-openstack-config-secret\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.264592 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nthl\" (UniqueName: \"kubernetes.io/projected/2d5b5a38-b853-47de-ada1-1d7c240e84e4-kube-api-access-4nthl\") pod \"openstackclient\" (UID: \"2d5b5a38-b853-47de-ada1-1d7c240e84e4\") " pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.292860 4706 generic.go:334] "Generic (PLEG): container finished" podID="7bec3465-219b-4c57-83a9-aed4c78d1483" containerID="8ac0b108392f0f9075ab778bb7034eb663c81084180d87d2ff18e5f093aadb51" exitCode=0 Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.293789 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-csbkx" event={"ID":"7bec3465-219b-4c57-83a9-aed4c78d1483","Type":"ContainerDied","Data":"8ac0b108392f0f9075ab778bb7034eb663c81084180d87d2ff18e5f093aadb51"} Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.316885 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.332003 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5d9ccff6c8-kmw5z"] Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.332260 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" containerID="cri-o://c65633ba26c87a617f75a17a16ddec504b12ea1e7b16c260be02e0e4178d401e" gracePeriod=30 Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.333237 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.149:9311/healthcheck\": dial tcp 10.217.0.149:9311: connect: connection refused" Dec 06 05:47:20 crc kubenswrapper[4706]: I1206 05:47:20.934300 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Dec 06 05:47:20 crc kubenswrapper[4706]: W1206 05:47:20.936699 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2d5b5a38_b853_47de_ada1_1d7c240e84e4.slice/crio-51368f315c914c418c515b374187c25f192783659ddd8d319f5f74ee0b387888 WatchSource:0}: Error finding container 51368f315c914c418c515b374187c25f192783659ddd8d319f5f74ee0b387888: Status 404 returned error can't find the container with id 51368f315c914c418c515b374187c25f192783659ddd8d319f5f74ee0b387888 Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.312128 4706 generic.go:334] "Generic (PLEG): container finished" podID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerID="c65633ba26c87a617f75a17a16ddec504b12ea1e7b16c260be02e0e4178d401e" exitCode=143 Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.312224 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerDied","Data":"c65633ba26c87a617f75a17a16ddec504b12ea1e7b16c260be02e0e4178d401e"} Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.312464 4706 scope.go:117] "RemoveContainer" containerID="26ad472cbd33633f7af1b4c342cc43095dec2824c00ca1a73fbc5510bcad3d5c" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.323563 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2d5b5a38-b853-47de-ada1-1d7c240e84e4","Type":"ContainerStarted","Data":"51368f315c914c418c515b374187c25f192783659ddd8d319f5f74ee0b387888"} Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.501028 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.704795 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpxf4\" (UniqueName: \"kubernetes.io/projected/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-kube-api-access-mpxf4\") pod \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.704977 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-combined-ca-bundle\") pod \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.705078 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data\") pod \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.705245 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-logs\") pod \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.705325 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data-custom\") pod \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\" (UID: \"687c17f9-1a75-4b68-8d97-1285ad8f9e3d\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.705695 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-logs" (OuterVolumeSpecName: "logs") pod "687c17f9-1a75-4b68-8d97-1285ad8f9e3d" (UID: "687c17f9-1a75-4b68-8d97-1285ad8f9e3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.706336 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.718801 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "687c17f9-1a75-4b68-8d97-1285ad8f9e3d" (UID: "687c17f9-1a75-4b68-8d97-1285ad8f9e3d"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.720160 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-kube-api-access-mpxf4" (OuterVolumeSpecName: "kube-api-access-mpxf4") pod "687c17f9-1a75-4b68-8d97-1285ad8f9e3d" (UID: "687c17f9-1a75-4b68-8d97-1285ad8f9e3d"). InnerVolumeSpecName "kube-api-access-mpxf4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.720258 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.727078 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-csbkx" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.789665 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data" (OuterVolumeSpecName: "config-data") pod "687c17f9-1a75-4b68-8d97-1285ad8f9e3d" (UID: "687c17f9-1a75-4b68-8d97-1285ad8f9e3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.793767 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "687c17f9-1a75-4b68-8d97-1285ad8f9e3d" (UID: "687c17f9-1a75-4b68-8d97-1285ad8f9e3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.798187 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-v2sfb"] Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.799070 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerName="dnsmasq-dns" containerID="cri-o://c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8" gracePeriod=10 Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.809040 4706 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.809103 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpxf4\" (UniqueName: \"kubernetes.io/projected/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-kube-api-access-mpxf4\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.809119 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.809133 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/687c17f9-1a75-4b68-8d97-1285ad8f9e3d-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.910017 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-config-data\") pod \"7bec3465-219b-4c57-83a9-aed4c78d1483\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.910158 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-scripts\") pod \"7bec3465-219b-4c57-83a9-aed4c78d1483\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.910191 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8z98j\" (UniqueName: \"kubernetes.io/projected/7bec3465-219b-4c57-83a9-aed4c78d1483-kube-api-access-8z98j\") pod \"7bec3465-219b-4c57-83a9-aed4c78d1483\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.910244 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-combined-ca-bundle\") pod \"7bec3465-219b-4c57-83a9-aed4c78d1483\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.910327 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-db-sync-config-data\") pod \"7bec3465-219b-4c57-83a9-aed4c78d1483\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.910439 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7bec3465-219b-4c57-83a9-aed4c78d1483-etc-machine-id\") pod \"7bec3465-219b-4c57-83a9-aed4c78d1483\" (UID: \"7bec3465-219b-4c57-83a9-aed4c78d1483\") " Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.911796 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/7bec3465-219b-4c57-83a9-aed4c78d1483-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "7bec3465-219b-4c57-83a9-aed4c78d1483" (UID: "7bec3465-219b-4c57-83a9-aed4c78d1483"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.919567 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-scripts" (OuterVolumeSpecName: "scripts") pod "7bec3465-219b-4c57-83a9-aed4c78d1483" (UID: "7bec3465-219b-4c57-83a9-aed4c78d1483"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.919624 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bec3465-219b-4c57-83a9-aed4c78d1483-kube-api-access-8z98j" (OuterVolumeSpecName: "kube-api-access-8z98j") pod "7bec3465-219b-4c57-83a9-aed4c78d1483" (UID: "7bec3465-219b-4c57-83a9-aed4c78d1483"). InnerVolumeSpecName "kube-api-access-8z98j". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.920903 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7bec3465-219b-4c57-83a9-aed4c78d1483" (UID: "7bec3465-219b-4c57-83a9-aed4c78d1483"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.943953 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7bec3465-219b-4c57-83a9-aed4c78d1483" (UID: "7bec3465-219b-4c57-83a9-aed4c78d1483"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:21 crc kubenswrapper[4706]: I1206 05:47:21.972330 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-config-data" (OuterVolumeSpecName: "config-data") pod "7bec3465-219b-4c57-83a9-aed4c78d1483" (UID: "7bec3465-219b-4c57-83a9-aed4c78d1483"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.013623 4706 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/7bec3465-219b-4c57-83a9-aed4c78d1483-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.013692 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.013705 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.013717 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8z98j\" (UniqueName: \"kubernetes.io/projected/7bec3465-219b-4c57-83a9-aed4c78d1483-kube-api-access-8z98j\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.013733 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.013774 4706 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7bec3465-219b-4c57-83a9-aed4c78d1483-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.369765 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.370391 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.370454 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5d9ccff6c8-kmw5z" event={"ID":"687c17f9-1a75-4b68-8d97-1285ad8f9e3d","Type":"ContainerDied","Data":"b9cc647db6f664a9ccb5e207dea2d75eaa1d359175b77700488d27e41f545fcc"} Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.371424 4706 scope.go:117] "RemoveContainer" containerID="c65633ba26c87a617f75a17a16ddec504b12ea1e7b16c260be02e0e4178d401e" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.383036 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-csbkx" event={"ID":"7bec3465-219b-4c57-83a9-aed4c78d1483","Type":"ContainerDied","Data":"ba7f91f7348165896fcb59713b39f97a573a22dc1b6f9d23f19257731a925fc2"} Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.383089 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba7f91f7348165896fcb59713b39f97a573a22dc1b6f9d23f19257731a925fc2" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.383174 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-csbkx" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.399461 4706 generic.go:334] "Generic (PLEG): container finished" podID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerID="c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8" exitCode=0 Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.399509 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" event={"ID":"c6cd9b30-8090-48ed-9c45-bca903c380ee","Type":"ContainerDied","Data":"c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8"} Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.399533 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" event={"ID":"c6cd9b30-8090-48ed-9c45-bca903c380ee","Type":"ContainerDied","Data":"2805bc2829a6baf80e9716c9962a9741b8f536bd6a50339f4af0dec45e48e405"} Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.399635 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7979dc8455-v2sfb" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.416469 4706 scope.go:117] "RemoveContainer" containerID="5c008e2e346c97d8437ddcf7143a68076040812b5dc1bc770005c634192c2853" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.420340 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-config\") pod \"c6cd9b30-8090-48ed-9c45-bca903c380ee\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.420408 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-swift-storage-0\") pod \"c6cd9b30-8090-48ed-9c45-bca903c380ee\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.420498 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-nb\") pod \"c6cd9b30-8090-48ed-9c45-bca903c380ee\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.420536 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-svc\") pod \"c6cd9b30-8090-48ed-9c45-bca903c380ee\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.420621 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-sb\") pod \"c6cd9b30-8090-48ed-9c45-bca903c380ee\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.420649 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4hpzw\" (UniqueName: \"kubernetes.io/projected/c6cd9b30-8090-48ed-9c45-bca903c380ee-kube-api-access-4hpzw\") pod \"c6cd9b30-8090-48ed-9c45-bca903c380ee\" (UID: \"c6cd9b30-8090-48ed-9c45-bca903c380ee\") " Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.461311 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6cd9b30-8090-48ed-9c45-bca903c380ee-kube-api-access-4hpzw" (OuterVolumeSpecName: "kube-api-access-4hpzw") pod "c6cd9b30-8090-48ed-9c45-bca903c380ee" (UID: "c6cd9b30-8090-48ed-9c45-bca903c380ee"). InnerVolumeSpecName "kube-api-access-4hpzw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.469447 4706 scope.go:117] "RemoveContainer" containerID="c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.508109 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5d9ccff6c8-kmw5z"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.517562 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-config" (OuterVolumeSpecName: "config") pod "c6cd9b30-8090-48ed-9c45-bca903c380ee" (UID: "c6cd9b30-8090-48ed-9c45-bca903c380ee"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.522359 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4hpzw\" (UniqueName: \"kubernetes.io/projected/c6cd9b30-8090-48ed-9c45-bca903c380ee-kube-api-access-4hpzw\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.522393 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.527724 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5d9ccff6c8-kmw5z"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.570623 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c6cd9b30-8090-48ed-9c45-bca903c380ee" (UID: "c6cd9b30-8090-48ed-9c45-bca903c380ee"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.595577 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c6cd9b30-8090-48ed-9c45-bca903c380ee" (UID: "c6cd9b30-8090-48ed-9c45-bca903c380ee"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.595737 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c6cd9b30-8090-48ed-9c45-bca903c380ee" (UID: "c6cd9b30-8090-48ed-9c45-bca903c380ee"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.628857 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.628901 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.628915 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.634454 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.634942 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.634991 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.635014 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635024 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.635041 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635068 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.635079 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635088 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.635100 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bec3465-219b-4c57-83a9-aed4c78d1483" containerName="cinder-db-sync" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635107 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bec3465-219b-4c57-83a9-aed4c78d1483" containerName="cinder-db-sync" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.635126 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerName="dnsmasq-dns" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635133 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerName="dnsmasq-dns" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.635155 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerName="init" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635162 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerName="init" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635364 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635395 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bec3465-219b-4c57-83a9-aed4c78d1483" containerName="cinder-db-sync" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635404 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635420 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" containerName="dnsmasq-dns" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.635436 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.641195 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" containerName="barbican-api-log" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.646483 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.654192 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c6cd9b30-8090-48ed-9c45-bca903c380ee" (UID: "c6cd9b30-8090-48ed-9c45-bca903c380ee"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.654549 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.654810 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.654933 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-8rgx9" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.655087 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.672562 4706 scope.go:117] "RemoveContainer" containerID="cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.681077 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.709080 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95d56546f-jj74z"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.711231 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.731735 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c6cd9b30-8090-48ed-9c45-bca903c380ee-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.739804 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-jj74z"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.783414 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-v2sfb"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.793422 4706 scope.go:117] "RemoveContainer" containerID="c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.795387 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8\": container with ID starting with c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8 not found: ID does not exist" containerID="c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.795444 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8"} err="failed to get container status \"c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8\": rpc error: code = NotFound desc = could not find container \"c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8\": container with ID starting with c966c6915766bbd5c74efbdfdc4abf449953f6c9c4efaa5df3e90b5b349cf4b8 not found: ID does not exist" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.795481 4706 scope.go:117] "RemoveContainer" containerID="cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95" Dec 06 05:47:22 crc kubenswrapper[4706]: E1206 05:47:22.796654 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95\": container with ID starting with cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95 not found: ID does not exist" containerID="cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.796705 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95"} err="failed to get container status \"cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95\": rpc error: code = NotFound desc = could not find container \"cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95\": container with ID starting with cd0c70c1440355130c89f7e9d6ac09b6d6f5d744a234851122b9e521f7dc6d95 not found: ID does not exist" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.808756 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.811548 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.813447 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.829587 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7979dc8455-v2sfb"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.835208 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.838889 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvnfx\" (UniqueName: \"kubernetes.io/projected/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-kube-api-access-lvnfx\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.839034 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-logs\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.839197 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-scripts\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.841571 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwv8s\" (UniqueName: \"kubernetes.io/projected/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-kube-api-access-nwv8s\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.841679 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-svc\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.841773 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.841848 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.841944 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-sb\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842070 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-scripts\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842162 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842240 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data-custom\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842429 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842483 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-nb\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842689 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-config\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842729 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-swift-storage-0\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842810 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gw6kw\" (UniqueName: \"kubernetes.io/projected/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-kube-api-access-gw6kw\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842874 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.842947 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.847835 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944146 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-config\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944363 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-swift-storage-0\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944443 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gw6kw\" (UniqueName: \"kubernetes.io/projected/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-kube-api-access-gw6kw\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944600 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944634 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944650 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944675 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvnfx\" (UniqueName: \"kubernetes.io/projected/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-kube-api-access-lvnfx\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944690 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-logs\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944731 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-scripts\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944748 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwv8s\" (UniqueName: \"kubernetes.io/projected/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-kube-api-access-nwv8s\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944764 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-svc\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944786 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944802 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944817 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-sb\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944841 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-scripts\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944863 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944881 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data-custom\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944903 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.944921 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-nb\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.945726 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-nb\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.946253 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-config\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.946748 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-swift-storage-0\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.947012 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.950350 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-logs\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.950587 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.950708 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-etc-machine-id\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.951321 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-sb\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.952508 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.953661 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.953988 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-svc\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.954586 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-scripts\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.956206 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-scripts\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.956982 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.960375 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.962943 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data-custom\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.968499 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvnfx\" (UniqueName: \"kubernetes.io/projected/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-kube-api-access-lvnfx\") pod \"dnsmasq-dns-95d56546f-jj74z\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.973226 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwv8s\" (UniqueName: \"kubernetes.io/projected/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-kube-api-access-nwv8s\") pod \"cinder-scheduler-0\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.973353 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gw6kw\" (UniqueName: \"kubernetes.io/projected/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-kube-api-access-gw6kw\") pod \"cinder-api-0\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " pod="openstack/cinder-api-0" Dec 06 05:47:22 crc kubenswrapper[4706]: I1206 05:47:22.981500 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 06 05:47:23 crc kubenswrapper[4706]: I1206 05:47:23.052346 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:23 crc kubenswrapper[4706]: I1206 05:47:23.160140 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 06 05:47:23 crc kubenswrapper[4706]: I1206 05:47:23.521354 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:23 crc kubenswrapper[4706]: I1206 05:47:23.691794 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-jj74z"] Dec 06 05:47:23 crc kubenswrapper[4706]: W1206 05:47:23.714864 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode10b1414_cac8_4d46_a7e8_dfa6c978d13c.slice/crio-4105b2e195a2567e6db64bce4cb0bb0032888e9830aab8e0fe68e5d5c77c0429 WatchSource:0}: Error finding container 4105b2e195a2567e6db64bce4cb0bb0032888e9830aab8e0fe68e5d5c77c0429: Status 404 returned error can't find the container with id 4105b2e195a2567e6db64bce4cb0bb0032888e9830aab8e0fe68e5d5c77c0429 Dec 06 05:47:23 crc kubenswrapper[4706]: I1206 05:47:23.801165 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:23 crc kubenswrapper[4706]: W1206 05:47:23.809525 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9d9025f_cc6c_42ed_b686_ce0de76d83c7.slice/crio-6b5f8831a3b670d40796d9f0db525c83971d45787e4c94c0dd6d1369d13dd2d8 WatchSource:0}: Error finding container 6b5f8831a3b670d40796d9f0db525c83971d45787e4c94c0dd6d1369d13dd2d8: Status 404 returned error can't find the container with id 6b5f8831a3b670d40796d9f0db525c83971d45787e4c94c0dd6d1369d13dd2d8 Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.049341 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="687c17f9-1a75-4b68-8d97-1285ad8f9e3d" path="/var/lib/kubelet/pods/687c17f9-1a75-4b68-8d97-1285ad8f9e3d/volumes" Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.050159 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6cd9b30-8090-48ed-9c45-bca903c380ee" path="/var/lib/kubelet/pods/c6cd9b30-8090-48ed-9c45-bca903c380ee/volumes" Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.449936 4706 generic.go:334] "Generic (PLEG): container finished" podID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerID="389b77c493421fddd9ce10f09ad4240c963133ae19f05eb0ec099dc78e449b0e" exitCode=0 Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.450146 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-jj74z" event={"ID":"e10b1414-cac8-4d46-a7e8-dfa6c978d13c","Type":"ContainerDied","Data":"389b77c493421fddd9ce10f09ad4240c963133ae19f05eb0ec099dc78e449b0e"} Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.455388 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-jj74z" event={"ID":"e10b1414-cac8-4d46-a7e8-dfa6c978d13c","Type":"ContainerStarted","Data":"4105b2e195a2567e6db64bce4cb0bb0032888e9830aab8e0fe68e5d5c77c0429"} Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.526150 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a9d9025f-cc6c-42ed-b686-ce0de76d83c7","Type":"ContainerStarted","Data":"6b5f8831a3b670d40796d9f0db525c83971d45787e4c94c0dd6d1369d13dd2d8"} Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.536588 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a29d8d3-8bd3-493f-b40c-89104b4d3a02","Type":"ContainerStarted","Data":"51ed28cd2c78cf796733e4289cc238737fc9797d3a28f59bc7191f8d364e2f8e"} Dec 06 05:47:24 crc kubenswrapper[4706]: I1206 05:47:24.946677 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.099062 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7f666db4c-wsc2b"] Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.105796 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.111468 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.111690 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.111798 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.130708 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7f666db4c-wsc2b"] Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.223813 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-public-tls-certs\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224174 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-internal-tls-certs\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224207 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2586d\" (UniqueName: \"kubernetes.io/projected/38ce5378-a514-4454-8f74-73226df682e6-kube-api-access-2586d\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224232 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/38ce5378-a514-4454-8f74-73226df682e6-etc-swift\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224273 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ce5378-a514-4454-8f74-73226df682e6-log-httpd\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224305 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ce5378-a514-4454-8f74-73226df682e6-run-httpd\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224332 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-combined-ca-bundle\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.224400 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-config-data\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.326544 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-config-data\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.326655 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-public-tls-certs\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.326681 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-internal-tls-certs\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.326702 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2586d\" (UniqueName: \"kubernetes.io/projected/38ce5378-a514-4454-8f74-73226df682e6-kube-api-access-2586d\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.326721 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/38ce5378-a514-4454-8f74-73226df682e6-etc-swift\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.326746 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ce5378-a514-4454-8f74-73226df682e6-log-httpd\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.327475 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ce5378-a514-4454-8f74-73226df682e6-run-httpd\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.327559 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-combined-ca-bundle\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.327656 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ce5378-a514-4454-8f74-73226df682e6-run-httpd\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.327835 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/38ce5378-a514-4454-8f74-73226df682e6-log-httpd\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.331803 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-public-tls-certs\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.333292 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-internal-tls-certs\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.335880 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/38ce5378-a514-4454-8f74-73226df682e6-etc-swift\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.336707 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-config-data\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.349752 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2586d\" (UniqueName: \"kubernetes.io/projected/38ce5378-a514-4454-8f74-73226df682e6-kube-api-access-2586d\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.349817 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/38ce5378-a514-4454-8f74-73226df682e6-combined-ca-bundle\") pod \"swift-proxy-7f666db4c-wsc2b\" (UID: \"38ce5378-a514-4454-8f74-73226df682e6\") " pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.430463 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.615334 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a9d9025f-cc6c-42ed-b686-ce0de76d83c7","Type":"ContainerStarted","Data":"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea"} Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.630602 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a29d8d3-8bd3-493f-b40c-89104b4d3a02","Type":"ContainerStarted","Data":"bbcda477d3fd9a6ff83b8f8fdf3485c2cf6162604ff6d9989953a968d572e160"} Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.647990 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-jj74z" event={"ID":"e10b1414-cac8-4d46-a7e8-dfa6c978d13c","Type":"ContainerStarted","Data":"7f03325c628ee98cccae8880fff9aa3524dbf4c1978edae9de916a75ce9bab36"} Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.648332 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:25 crc kubenswrapper[4706]: I1206 05:47:25.686419 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-95d56546f-jj74z" podStartSLOduration=3.686394052 podStartE2EDuration="3.686394052s" podCreationTimestamp="2025-12-06 05:47:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:25.676883684 +0000 UTC m=+1668.004707638" watchObservedRunningTime="2025-12-06 05:47:25.686394052 +0000 UTC m=+1668.014218006" Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.222661 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7f666db4c-wsc2b"] Dec 06 05:47:26 crc kubenswrapper[4706]: W1206 05:47:26.266167 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38ce5378_a514_4454_8f74_73226df682e6.slice/crio-65176fe0fcc27dd0101dba634d50af9b468bd946df179b0b94ab10c624b150d8 WatchSource:0}: Error finding container 65176fe0fcc27dd0101dba634d50af9b468bd946df179b0b94ab10c624b150d8: Status 404 returned error can't find the container with id 65176fe0fcc27dd0101dba634d50af9b468bd946df179b0b94ab10c624b150d8 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.666451 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f666db4c-wsc2b" event={"ID":"38ce5378-a514-4454-8f74-73226df682e6","Type":"ContainerStarted","Data":"b511dd8c30102217cdc7f171c5c0a40ee40e95c090ce70abf1c4c091e749e99b"} Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.666822 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f666db4c-wsc2b" event={"ID":"38ce5378-a514-4454-8f74-73226df682e6","Type":"ContainerStarted","Data":"65176fe0fcc27dd0101dba634d50af9b468bd946df179b0b94ab10c624b150d8"} Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.678951 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a9d9025f-cc6c-42ed-b686-ce0de76d83c7","Type":"ContainerStarted","Data":"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7"} Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.679402 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.679021 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api-log" containerID="cri-o://9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea" gracePeriod=30 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.679420 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api" containerID="cri-o://fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7" gracePeriod=30 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.703241 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.704111 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-central-agent" containerID="cri-o://b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f" gracePeriod=30 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.704630 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="proxy-httpd" containerID="cri-o://9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185" gracePeriod=30 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.704790 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="sg-core" containerID="cri-o://27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7" gracePeriod=30 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.704891 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-notification-agent" containerID="cri-o://7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a" gracePeriod=30 Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.708298 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a29d8d3-8bd3-493f-b40c-89104b4d3a02","Type":"ContainerStarted","Data":"c0613fb5cf6d378604fa3a0322447e04b36d8f54a054a76970c08d0d62e9b661"} Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.733897 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.733876518 podStartE2EDuration="4.733876518s" podCreationTimestamp="2025-12-06 05:47:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:26.714359601 +0000 UTC m=+1669.042183545" watchObservedRunningTime="2025-12-06 05:47:26.733876518 +0000 UTC m=+1669.061700462" Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.735263 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.754038 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.147392376 podStartE2EDuration="4.754022743s" podCreationTimestamp="2025-12-06 05:47:22 +0000 UTC" firstStartedPulling="2025-12-06 05:47:23.526156186 +0000 UTC m=+1665.853980130" lastFinishedPulling="2025-12-06 05:47:24.132786553 +0000 UTC m=+1666.460610497" observedRunningTime="2025-12-06 05:47:26.751171325 +0000 UTC m=+1669.078995269" watchObservedRunningTime="2025-12-06 05:47:26.754022743 +0000 UTC m=+1669.081846687" Dec 06 05:47:26 crc kubenswrapper[4706]: I1206 05:47:26.855261 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jtjm9" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" probeResult="failure" output=< Dec 06 05:47:26 crc kubenswrapper[4706]: timeout: failed to connect service ":50051" within 1s Dec 06 05:47:26 crc kubenswrapper[4706]: > Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.434587 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588430 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-combined-ca-bundle\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588489 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-logs\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588518 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-scripts\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588577 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-etc-machine-id\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588647 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data-custom\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588844 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gw6kw\" (UniqueName: \"kubernetes.io/projected/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-kube-api-access-gw6kw\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588882 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data\") pod \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\" (UID: \"a9d9025f-cc6c-42ed-b686-ce0de76d83c7\") " Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.588988 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-logs" (OuterVolumeSpecName: "logs") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.589289 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.589312 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.596535 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-scripts" (OuterVolumeSpecName: "scripts") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.605277 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-kube-api-access-gw6kw" (OuterVolumeSpecName: "kube-api-access-gw6kw") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "kube-api-access-gw6kw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.623250 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.649286 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.687767 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data" (OuterVolumeSpecName: "config-data") pod "a9d9025f-cc6c-42ed-b686-ce0de76d83c7" (UID: "a9d9025f-cc6c-42ed-b686-ce0de76d83c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.691034 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gw6kw\" (UniqueName: \"kubernetes.io/projected/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-kube-api-access-gw6kw\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.691074 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.691084 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.691092 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.691101 4706 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.691108 4706 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a9d9025f-cc6c-42ed-b686-ce0de76d83c7-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.725513 4706 generic.go:334] "Generic (PLEG): container finished" podID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerID="9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185" exitCode=0 Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.725546 4706 generic.go:334] "Generic (PLEG): container finished" podID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerID="27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7" exitCode=2 Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.725556 4706 generic.go:334] "Generic (PLEG): container finished" podID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerID="b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f" exitCode=0 Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.725711 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerDied","Data":"9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.725738 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerDied","Data":"27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.725750 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerDied","Data":"b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738294 4706 generic.go:334] "Generic (PLEG): container finished" podID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerID="fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7" exitCode=0 Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738342 4706 generic.go:334] "Generic (PLEG): container finished" podID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerID="9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea" exitCode=143 Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738414 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738426 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a9d9025f-cc6c-42ed-b686-ce0de76d83c7","Type":"ContainerDied","Data":"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738709 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a9d9025f-cc6c-42ed-b686-ce0de76d83c7","Type":"ContainerDied","Data":"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738740 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"a9d9025f-cc6c-42ed-b686-ce0de76d83c7","Type":"ContainerDied","Data":"6b5f8831a3b670d40796d9f0db525c83971d45787e4c94c0dd6d1369d13dd2d8"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.738760 4706 scope.go:117] "RemoveContainer" containerID="fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.750798 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7f666db4c-wsc2b" event={"ID":"38ce5378-a514-4454-8f74-73226df682e6","Type":"ContainerStarted","Data":"d47e6ac75f17d937931313748df7397d248a1f375ffa881a1a72a1fd289867df"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.752450 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.752720 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.758199 4706 generic.go:334] "Generic (PLEG): container finished" podID="bc067583-4394-4fa3-86fc-d6e626ec0f18" containerID="f4c3a5a604c3d6b739b1cdb3098d206b09058eebb8b42cf5898c707a28617b45" exitCode=0 Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.758268 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-pjt6m" event={"ID":"bc067583-4394-4fa3-86fc-d6e626ec0f18","Type":"ContainerDied","Data":"f4c3a5a604c3d6b739b1cdb3098d206b09058eebb8b42cf5898c707a28617b45"} Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.783574 4706 scope.go:117] "RemoveContainer" containerID="9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.791125 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7f666db4c-wsc2b" podStartSLOduration=2.791091577 podStartE2EDuration="2.791091577s" podCreationTimestamp="2025-12-06 05:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:27.784073858 +0000 UTC m=+1670.111897822" watchObservedRunningTime="2025-12-06 05:47:27.791091577 +0000 UTC m=+1670.118915521" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.811549 4706 scope.go:117] "RemoveContainer" containerID="fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7" Dec 06 05:47:27 crc kubenswrapper[4706]: E1206 05:47:27.812121 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7\": container with ID starting with fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7 not found: ID does not exist" containerID="fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.812160 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7"} err="failed to get container status \"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7\": rpc error: code = NotFound desc = could not find container \"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7\": container with ID starting with fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7 not found: ID does not exist" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.812186 4706 scope.go:117] "RemoveContainer" containerID="9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.816110 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:27 crc kubenswrapper[4706]: E1206 05:47:27.816064 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea\": container with ID starting with 9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea not found: ID does not exist" containerID="9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.816588 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea"} err="failed to get container status \"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea\": rpc error: code = NotFound desc = could not find container \"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea\": container with ID starting with 9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea not found: ID does not exist" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.816624 4706 scope.go:117] "RemoveContainer" containerID="fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.824325 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7"} err="failed to get container status \"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7\": rpc error: code = NotFound desc = could not find container \"fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7\": container with ID starting with fe5d371e99d2107ee0c244ad9a346c079ffc9e36e36fb6820a1b4bd34f0a6ce7 not found: ID does not exist" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.824384 4706 scope.go:117] "RemoveContainer" containerID="9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.825432 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea"} err="failed to get container status \"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea\": rpc error: code = NotFound desc = could not find container \"9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea\": container with ID starting with 9afa769327a265fa2e9d6d4d07cfa43c0d7e5871a28779f0839cae24c20a00ea not found: ID does not exist" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.835228 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.858091 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:27 crc kubenswrapper[4706]: E1206 05:47:27.858546 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.858564 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api" Dec 06 05:47:27 crc kubenswrapper[4706]: E1206 05:47:27.858592 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api-log" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.858599 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api-log" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.858778 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.858802 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" containerName="cinder-api-log" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.859932 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.862681 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.862847 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.862990 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.864712 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.981673 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.997605 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.997653 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-config-data\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.997676 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-scripts\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.997719 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3631398b-6bec-44d1-bf3b-19f8e8114c5c-logs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.997793 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.997996 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3631398b-6bec-44d1-bf3b-19f8e8114c5c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.998087 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.998153 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfdph\" (UniqueName: \"kubernetes.io/projected/3631398b-6bec-44d1-bf3b-19f8e8114c5c-kube-api-access-sfdph\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:27 crc kubenswrapper[4706]: I1206 05:47:27.998201 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-config-data-custom\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.049404 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9d9025f-cc6c-42ed-b686-ce0de76d83c7" path="/var/lib/kubelet/pods/a9d9025f-cc6c-42ed-b686-ce0de76d83c7/volumes" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099762 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3631398b-6bec-44d1-bf3b-19f8e8114c5c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099835 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099869 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfdph\" (UniqueName: \"kubernetes.io/projected/3631398b-6bec-44d1-bf3b-19f8e8114c5c-kube-api-access-sfdph\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099892 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-config-data-custom\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099929 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099955 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-config-data\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.099977 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-scripts\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.100007 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3631398b-6bec-44d1-bf3b-19f8e8114c5c-logs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.100127 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.103422 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/3631398b-6bec-44d1-bf3b-19f8e8114c5c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.104680 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-public-tls-certs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.105246 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3631398b-6bec-44d1-bf3b-19f8e8114c5c-logs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.106134 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.107355 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-config-data-custom\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.107497 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-scripts\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.111465 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.115939 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3631398b-6bec-44d1-bf3b-19f8e8114c5c-config-data\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.121578 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfdph\" (UniqueName: \"kubernetes.io/projected/3631398b-6bec-44d1-bf3b-19f8e8114c5c-kube-api-access-sfdph\") pod \"cinder-api-0\" (UID: \"3631398b-6bec-44d1-bf3b-19f8e8114c5c\") " pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.187907 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.710123 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.776283 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3631398b-6bec-44d1-bf3b-19f8e8114c5c","Type":"ContainerStarted","Data":"c221be90b367b196b8167ce6f8392b911b559383589b78534bf8530fb17b6755"} Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.986005 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.142:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.142:8443: connect: connection refused" Dec 06 05:47:28 crc kubenswrapper[4706]: I1206 05:47:28.986138 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.385277 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-pjt6m" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.531839 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-db-sync-config-data\") pod \"bc067583-4394-4fa3-86fc-d6e626ec0f18\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.532020 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-combined-ca-bundle\") pod \"bc067583-4394-4fa3-86fc-d6e626ec0f18\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.532123 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-config-data\") pod \"bc067583-4394-4fa3-86fc-d6e626ec0f18\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.532273 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s57cs\" (UniqueName: \"kubernetes.io/projected/bc067583-4394-4fa3-86fc-d6e626ec0f18-kube-api-access-s57cs\") pod \"bc067583-4394-4fa3-86fc-d6e626ec0f18\" (UID: \"bc067583-4394-4fa3-86fc-d6e626ec0f18\") " Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.538065 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "bc067583-4394-4fa3-86fc-d6e626ec0f18" (UID: "bc067583-4394-4fa3-86fc-d6e626ec0f18"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.539362 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc067583-4394-4fa3-86fc-d6e626ec0f18-kube-api-access-s57cs" (OuterVolumeSpecName: "kube-api-access-s57cs") pod "bc067583-4394-4fa3-86fc-d6e626ec0f18" (UID: "bc067583-4394-4fa3-86fc-d6e626ec0f18"). InnerVolumeSpecName "kube-api-access-s57cs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.586711 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bc067583-4394-4fa3-86fc-d6e626ec0f18" (UID: "bc067583-4394-4fa3-86fc-d6e626ec0f18"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.622025 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-config-data" (OuterVolumeSpecName: "config-data") pod "bc067583-4394-4fa3-86fc-d6e626ec0f18" (UID: "bc067583-4394-4fa3-86fc-d6e626ec0f18"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.636238 4706 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.636419 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.636538 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bc067583-4394-4fa3-86fc-d6e626ec0f18-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.636582 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s57cs\" (UniqueName: \"kubernetes.io/projected/bc067583-4394-4fa3-86fc-d6e626ec0f18-kube-api-access-s57cs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.804484 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-pjt6m" event={"ID":"bc067583-4394-4fa3-86fc-d6e626ec0f18","Type":"ContainerDied","Data":"f58590dfbd3033126090481ed225e6ccd32bf33f46517199611ef7161c3aa425"} Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.804528 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-pjt6m" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.804539 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f58590dfbd3033126090481ed225e6ccd32bf33f46517199611ef7161c3aa425" Dec 06 05:47:29 crc kubenswrapper[4706]: I1206 05:47:29.811398 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3631398b-6bec-44d1-bf3b-19f8e8114c5c","Type":"ContainerStarted","Data":"64d8eaa3d268f8c102367288fb88ac9f8958bead7b050932c44e9a39f949967b"} Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.138377 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-jj74z"] Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.138913 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-95d56546f-jj74z" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerName="dnsmasq-dns" containerID="cri-o://7f03325c628ee98cccae8880fff9aa3524dbf4c1978edae9de916a75ce9bab36" gracePeriod=10 Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.143336 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.182678 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-z825x"] Dec 06 05:47:30 crc kubenswrapper[4706]: E1206 05:47:30.200846 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" containerName="glance-db-sync" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.200869 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" containerName="glance-db-sync" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.201054 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" containerName="glance-db-sync" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.203933 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-z825x"] Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.204367 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.359456 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-config\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.359506 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.359530 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnbj6\" (UniqueName: \"kubernetes.io/projected/709e6d78-3db4-4779-a1ad-4c7eda89838d-kube-api-access-vnbj6\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.359605 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.359685 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.359711 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.461632 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.461716 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnbj6\" (UniqueName: \"kubernetes.io/projected/709e6d78-3db4-4779-a1ad-4c7eda89838d-kube-api-access-vnbj6\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.461786 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.461902 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.461934 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.461972 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-config\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.463120 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-config\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.463279 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.463816 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.464772 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.466365 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.485956 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnbj6\" (UniqueName: \"kubernetes.io/projected/709e6d78-3db4-4779-a1ad-4c7eda89838d-kube-api-access-vnbj6\") pod \"dnsmasq-dns-5784cf869f-z825x\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.537636 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.733414 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.831871 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"3631398b-6bec-44d1-bf3b-19f8e8114c5c","Type":"ContainerStarted","Data":"47b4cb0978bbafb8d63e29d4d8182ae1035d90eaf198db19530e609eda4753c4"} Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.832319 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.834840 4706 generic.go:334] "Generic (PLEG): container finished" podID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerID="7f03325c628ee98cccae8880fff9aa3524dbf4c1978edae9de916a75ce9bab36" exitCode=0 Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.834892 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-jj74z" event={"ID":"e10b1414-cac8-4d46-a7e8-dfa6c978d13c","Type":"ContainerDied","Data":"7f03325c628ee98cccae8880fff9aa3524dbf4c1978edae9de916a75ce9bab36"} Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.837133 4706 generic.go:334] "Generic (PLEG): container finished" podID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerID="7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a" exitCode=0 Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.837155 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerDied","Data":"7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a"} Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.837170 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2df640a0-8f4d-4743-84f9-32a9e187d282","Type":"ContainerDied","Data":"9d14beec0e18dccba74258df2468fd74a75cdb577714e91db87790024993c8b1"} Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.837193 4706 scope.go:117] "RemoveContainer" containerID="9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.837294 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.854785 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.854767869 podStartE2EDuration="3.854767869s" podCreationTimestamp="2025-12-06 05:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:30.850799451 +0000 UTC m=+1673.178623395" watchObservedRunningTime="2025-12-06 05:47:30.854767869 +0000 UTC m=+1673.182591813" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.872586 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-log-httpd\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.872763 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ss69s\" (UniqueName: \"kubernetes.io/projected/2df640a0-8f4d-4743-84f9-32a9e187d282-kube-api-access-ss69s\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.872918 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-combined-ca-bundle\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.873098 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-config-data\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.873315 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-sg-core-conf-yaml\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.873515 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-run-httpd\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.873651 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-scripts\") pod \"2df640a0-8f4d-4743-84f9-32a9e187d282\" (UID: \"2df640a0-8f4d-4743-84f9-32a9e187d282\") " Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.876222 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.876681 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.880525 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-scripts" (OuterVolumeSpecName: "scripts") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.886236 4706 scope.go:117] "RemoveContainer" containerID="27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.886567 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2df640a0-8f4d-4743-84f9-32a9e187d282-kube-api-access-ss69s" (OuterVolumeSpecName: "kube-api-access-ss69s") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "kube-api-access-ss69s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.952211 4706 scope.go:117] "RemoveContainer" containerID="7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.956650 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.975805 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.975834 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.975843 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ss69s\" (UniqueName: \"kubernetes.io/projected/2df640a0-8f4d-4743-84f9-32a9e187d282-kube-api-access-ss69s\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.975855 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.975863 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2df640a0-8f4d-4743-84f9-32a9e187d282-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.981606 4706 scope.go:117] "RemoveContainer" containerID="b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f" Dec 06 05:47:30 crc kubenswrapper[4706]: I1206 05:47:30.997253 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.008748 4706 scope.go:117] "RemoveContainer" containerID="9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.009864 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185\": container with ID starting with 9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185 not found: ID does not exist" containerID="9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.009903 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185"} err="failed to get container status \"9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185\": rpc error: code = NotFound desc = could not find container \"9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185\": container with ID starting with 9133a43b784380a19e539aa68558dcfcb732f30a1f8ef969e7aba3929539c185 not found: ID does not exist" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.009924 4706 scope.go:117] "RemoveContainer" containerID="27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.011407 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7\": container with ID starting with 27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7 not found: ID does not exist" containerID="27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.011443 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7"} err="failed to get container status \"27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7\": rpc error: code = NotFound desc = could not find container \"27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7\": container with ID starting with 27a55dcd05469839e11680828d8c1a8f324b117e82c0fcd8e2ed43a4ad7bb8c7 not found: ID does not exist" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.011464 4706 scope.go:117] "RemoveContainer" containerID="7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.011719 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a\": container with ID starting with 7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a not found: ID does not exist" containerID="7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.011741 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a"} err="failed to get container status \"7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a\": rpc error: code = NotFound desc = could not find container \"7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a\": container with ID starting with 7481f637da8457d5258f019034cbd64b4fff7d392b0107982e7fb453ae34bc1a not found: ID does not exist" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.011756 4706 scope.go:117] "RemoveContainer" containerID="b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.011962 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f\": container with ID starting with b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f not found: ID does not exist" containerID="b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.011982 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f"} err="failed to get container status \"b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f\": rpc error: code = NotFound desc = could not find container \"b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f\": container with ID starting with b6c6b612946de87b2e9f02d1187cf03be5000e9780f622d1f881e47c6a22035f not found: ID does not exist" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.021469 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-config-data" (OuterVolumeSpecName: "config-data") pod "2df640a0-8f4d-4743-84f9-32a9e187d282" (UID: "2df640a0-8f4d-4743-84f9-32a9e187d282"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.044248 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-z825x"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.083625 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.083665 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df640a0-8f4d-4743-84f9-32a9e187d282-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.148994 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.149381 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="sg-core" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149395 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="sg-core" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.149407 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-notification-agent" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149413 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-notification-agent" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.149428 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="proxy-httpd" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149433 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="proxy-httpd" Dec 06 05:47:31 crc kubenswrapper[4706]: E1206 05:47:31.149448 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-central-agent" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149457 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-central-agent" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149629 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-central-agent" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149644 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="ceilometer-notification-agent" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149655 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="sg-core" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.149670 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" containerName="proxy-httpd" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.150563 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.158025 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.158222 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mw9mc" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.158837 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.166506 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.237582 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.260560 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.273210 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.276867 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.279631 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.280501 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289099 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289320 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt52z\" (UniqueName: \"kubernetes.io/projected/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-kube-api-access-gt52z\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289483 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-config-data\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289523 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289552 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289769 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-logs\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.289976 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-scripts\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.290116 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.290588 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.295408 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.301538 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.314129 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.391908 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-logs\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392269 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392386 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-logs\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392499 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-run-httpd\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392635 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392764 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-scripts\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392865 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392959 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-log-httpd\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393104 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393205 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393309 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393404 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt52z\" (UniqueName: \"kubernetes.io/projected/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-kube-api-access-gt52z\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393497 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lvh7s\" (UniqueName: \"kubernetes.io/projected/fe58f8d9-1234-4aab-9eec-6e2bad482002-kube-api-access-lvh7s\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393589 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-scripts\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393676 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393788 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-config-data\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393538 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.392821 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-logs\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.393977 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.394090 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.394183 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-config-data\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.394277 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.394380 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krpdt\" (UniqueName: \"kubernetes.io/projected/312c3de0-e931-4bf0-ae83-588410c22061-kube-api-access-krpdt\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.398086 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.398709 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-config-data\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.398920 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.411303 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-scripts\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.412254 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt52z\" (UniqueName: \"kubernetes.io/projected/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-kube-api-access-gt52z\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.442436 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.479252 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496292 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496347 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-log-httpd\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496401 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496437 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496469 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lvh7s\" (UniqueName: \"kubernetes.io/projected/fe58f8d9-1234-4aab-9eec-6e2bad482002-kube-api-access-lvh7s\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496532 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-scripts\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496564 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496611 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-config-data\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496643 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496681 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krpdt\" (UniqueName: \"kubernetes.io/projected/312c3de0-e931-4bf0-ae83-588410c22061-kube-api-access-krpdt\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496721 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-logs\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496756 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496794 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-run-httpd\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.496827 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.497384 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.497680 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-logs\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.500020 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.500572 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-scripts\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.500686 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.500887 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.508385 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-run-httpd\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.508466 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-log-httpd\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.510082 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.514679 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-config-data\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.528549 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.532634 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krpdt\" (UniqueName: \"kubernetes.io/projected/312c3de0-e931-4bf0-ae83-588410c22061-kube-api-access-krpdt\") pod \"ceilometer-0\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.538086 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lvh7s\" (UniqueName: \"kubernetes.io/projected/fe58f8d9-1234-4aab-9eec-6e2bad482002-kube-api-access-lvh7s\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.551106 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.566107 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.655635 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.656624 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.736181 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.859261 4706 generic.go:334] "Generic (PLEG): container finished" podID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerID="551ad65c832494f001ee7773a3ef5d48f896bd4ff3cbefbffc0c6f067489305c" exitCode=0 Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.859331 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-z825x" event={"ID":"709e6d78-3db4-4779-a1ad-4c7eda89838d","Type":"ContainerDied","Data":"551ad65c832494f001ee7773a3ef5d48f896bd4ff3cbefbffc0c6f067489305c"} Dec 06 05:47:31 crc kubenswrapper[4706]: I1206 05:47:31.859358 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-z825x" event={"ID":"709e6d78-3db4-4779-a1ad-4c7eda89838d","Type":"ContainerStarted","Data":"58d90addc5c9c3622152629d6a3912fa68a47e7ab91f27254c561fcd305e64b9"} Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.065811 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2df640a0-8f4d-4743-84f9-32a9e187d282" path="/var/lib/kubelet/pods/2df640a0-8f4d-4743-84f9-32a9e187d282/volumes" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.143461 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.215891 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvnfx\" (UniqueName: \"kubernetes.io/projected/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-kube-api-access-lvnfx\") pod \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.216037 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-svc\") pod \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.216127 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-swift-storage-0\") pod \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.216186 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-nb\") pod \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.216691 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-sb\") pod \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.217177 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-config\") pod \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\" (UID: \"e10b1414-cac8-4d46-a7e8-dfa6c978d13c\") " Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.221833 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-kube-api-access-lvnfx" (OuterVolumeSpecName: "kube-api-access-lvnfx") pod "e10b1414-cac8-4d46-a7e8-dfa6c978d13c" (UID: "e10b1414-cac8-4d46-a7e8-dfa6c978d13c"). InnerVolumeSpecName "kube-api-access-lvnfx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.283680 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e10b1414-cac8-4d46-a7e8-dfa6c978d13c" (UID: "e10b1414-cac8-4d46-a7e8-dfa6c978d13c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.291111 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-config" (OuterVolumeSpecName: "config") pod "e10b1414-cac8-4d46-a7e8-dfa6c978d13c" (UID: "e10b1414-cac8-4d46-a7e8-dfa6c978d13c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.297668 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e10b1414-cac8-4d46-a7e8-dfa6c978d13c" (UID: "e10b1414-cac8-4d46-a7e8-dfa6c978d13c"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.297997 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e10b1414-cac8-4d46-a7e8-dfa6c978d13c" (UID: "e10b1414-cac8-4d46-a7e8-dfa6c978d13c"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.316938 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e10b1414-cac8-4d46-a7e8-dfa6c978d13c" (UID: "e10b1414-cac8-4d46-a7e8-dfa6c978d13c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.320900 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.320934 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvnfx\" (UniqueName: \"kubernetes.io/projected/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-kube-api-access-lvnfx\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.320946 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.320954 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.320964 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.320972 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e10b1414-cac8-4d46-a7e8-dfa6c978d13c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.354665 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.465565 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.563661 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:32 crc kubenswrapper[4706]: W1206 05:47:32.573753 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe58f8d9_1234_4aab_9eec_6e2bad482002.slice/crio-4cc059581b4b52906d36236556ab96beebc94db2957d35847768a60243a8cd7b WatchSource:0}: Error finding container 4cc059581b4b52906d36236556ab96beebc94db2957d35847768a60243a8cd7b: Status 404 returned error can't find the container with id 4cc059581b4b52906d36236556ab96beebc94db2957d35847768a60243a8cd7b Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.890440 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95d56546f-jj74z" event={"ID":"e10b1414-cac8-4d46-a7e8-dfa6c978d13c","Type":"ContainerDied","Data":"4105b2e195a2567e6db64bce4cb0bb0032888e9830aab8e0fe68e5d5c77c0429"} Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.890460 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95d56546f-jj74z" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.890773 4706 scope.go:117] "RemoveContainer" containerID="7f03325c628ee98cccae8880fff9aa3524dbf4c1978edae9de916a75ce9bab36" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.894255 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerStarted","Data":"d5756a09975524bdcb1140ffda48c7b66e0452c3d4f203e938f86278270ecaca"} Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.897595 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-z825x" event={"ID":"709e6d78-3db4-4779-a1ad-4c7eda89838d","Type":"ContainerStarted","Data":"19d4f5cba503e86f785f6ea782d6767afea8fa684b7ed675ae51ebd06ff23612"} Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.897720 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.899978 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe58f8d9-1234-4aab-9eec-6e2bad482002","Type":"ContainerStarted","Data":"4cc059581b4b52906d36236556ab96beebc94db2957d35847768a60243a8cd7b"} Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.901933 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d","Type":"ContainerStarted","Data":"82a1bd9d74e600e9ae6443fe92519d1272a966d1f1aec3509aa608efdf0a22a4"} Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.919774 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-z825x" podStartSLOduration=2.919753072 podStartE2EDuration="2.919753072s" podCreationTimestamp="2025-12-06 05:47:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:32.913524004 +0000 UTC m=+1675.241347958" watchObservedRunningTime="2025-12-06 05:47:32.919753072 +0000 UTC m=+1675.247577036" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.940097 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-jj74z"] Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.941224 4706 scope.go:117] "RemoveContainer" containerID="389b77c493421fddd9ce10f09ad4240c963133ae19f05eb0ec099dc78e449b0e" Dec 06 05:47:32 crc kubenswrapper[4706]: I1206 05:47:32.949484 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95d56546f-jj74z"] Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.298482 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.346222 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.375677 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.441226 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.916700 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe58f8d9-1234-4aab-9eec-6e2bad482002","Type":"ContainerStarted","Data":"f3523dfa7e90a128a77e492e27b44740a88cec7e2e55cbd77e99fddb20e37054"} Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.917929 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d","Type":"ContainerStarted","Data":"f280a5422ec69deb2c0e77890300be8e9237d9012597ec59ec79111e30969208"} Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.918857 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="probe" containerID="cri-o://c0613fb5cf6d378604fa3a0322447e04b36d8f54a054a76970c08d0d62e9b661" gracePeriod=30 Dec 06 05:47:33 crc kubenswrapper[4706]: I1206 05:47:33.918643 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="cinder-scheduler" containerID="cri-o://bbcda477d3fd9a6ff83b8f8fdf3485c2cf6162604ff6d9989953a968d572e160" gracePeriod=30 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.056750 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" path="/var/lib/kubelet/pods/e10b1414-cac8-4d46-a7e8-dfa6c978d13c/volumes" Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.928975 4706 generic.go:334] "Generic (PLEG): container finished" podID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerID="c0613fb5cf6d378604fa3a0322447e04b36d8f54a054a76970c08d0d62e9b661" exitCode=0 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.929293 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a29d8d3-8bd3-493f-b40c-89104b4d3a02","Type":"ContainerDied","Data":"c0613fb5cf6d378604fa3a0322447e04b36d8f54a054a76970c08d0d62e9b661"} Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.932621 4706 generic.go:334] "Generic (PLEG): container finished" podID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerID="5a6bfebf319d4eb3f9bbfd4b2c9f92dbf46c782a519f3895cca98b4760ce3f3b" exitCode=137 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.932686 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f979b84f6-hzq85" event={"ID":"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf","Type":"ContainerDied","Data":"5a6bfebf319d4eb3f9bbfd4b2c9f92dbf46c782a519f3895cca98b4760ce3f3b"} Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.934599 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe58f8d9-1234-4aab-9eec-6e2bad482002","Type":"ContainerStarted","Data":"408b2e7ba439a70138bf8da900757aff07d11b2f851d0db013299a0b38895e7f"} Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.934743 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-log" containerID="cri-o://f3523dfa7e90a128a77e492e27b44740a88cec7e2e55cbd77e99fddb20e37054" gracePeriod=30 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.935944 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-httpd" containerID="cri-o://408b2e7ba439a70138bf8da900757aff07d11b2f851d0db013299a0b38895e7f" gracePeriod=30 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.943207 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d","Type":"ContainerStarted","Data":"fef4867fdb51598c34511e38a1fea2ee0b5cdfa65f66aae9de3bec3c7a27a6de"} Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.943380 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-log" containerID="cri-o://f280a5422ec69deb2c0e77890300be8e9237d9012597ec59ec79111e30969208" gracePeriod=30 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.943418 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-httpd" containerID="cri-o://fef4867fdb51598c34511e38a1fea2ee0b5cdfa65f66aae9de3bec3c7a27a6de" gracePeriod=30 Dec 06 05:47:34 crc kubenswrapper[4706]: I1206 05:47:34.972695 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.972680369 podStartE2EDuration="4.972680369s" podCreationTimestamp="2025-12-06 05:47:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:34.968963848 +0000 UTC m=+1677.296787792" watchObservedRunningTime="2025-12-06 05:47:34.972680369 +0000 UTC m=+1677.300504303" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.004163 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.004142749 podStartE2EDuration="5.004142749s" podCreationTimestamp="2025-12-06 05:47:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:34.987588162 +0000 UTC m=+1677.315412106" watchObservedRunningTime="2025-12-06 05:47:35.004142749 +0000 UTC m=+1677.331966693" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.266179 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-46nqn"] Dec 06 05:47:35 crc kubenswrapper[4706]: E1206 05:47:35.266802 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerName="dnsmasq-dns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.266866 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerName="dnsmasq-dns" Dec 06 05:47:35 crc kubenswrapper[4706]: E1206 05:47:35.266943 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerName="init" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.266991 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerName="init" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.267229 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e10b1414-cac8-4d46-a7e8-dfa6c978d13c" containerName="dnsmasq-dns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.267964 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.292744 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwjw4\" (UniqueName: \"kubernetes.io/projected/f16bb998-03b9-4bd9-93d4-9965fd119d32-kube-api-access-kwjw4\") pod \"nova-api-db-create-46nqn\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.292832 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f16bb998-03b9-4bd9-93d4-9965fd119d32-operator-scripts\") pod \"nova-api-db-create-46nqn\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.314061 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-46nqn"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.381561 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-mpcv6"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.383085 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.394197 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwjw4\" (UniqueName: \"kubernetes.io/projected/f16bb998-03b9-4bd9-93d4-9965fd119d32-kube-api-access-kwjw4\") pod \"nova-api-db-create-46nqn\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.394304 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f16bb998-03b9-4bd9-93d4-9965fd119d32-operator-scripts\") pod \"nova-api-db-create-46nqn\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.395145 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f16bb998-03b9-4bd9-93d4-9965fd119d32-operator-scripts\") pod \"nova-api-db-create-46nqn\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.421960 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwjw4\" (UniqueName: \"kubernetes.io/projected/f16bb998-03b9-4bd9-93d4-9965fd119d32-kube-api-access-kwjw4\") pod \"nova-api-db-create-46nqn\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.439156 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.440162 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mpcv6"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.444628 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7f666db4c-wsc2b" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.502086 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-b649-account-create-update-htrrc"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.503413 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fdbb7\" (UniqueName: \"kubernetes.io/projected/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-kube-api-access-fdbb7\") pod \"nova-cell0-db-create-mpcv6\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.503531 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-operator-scripts\") pod \"nova-cell0-db-create-mpcv6\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.504036 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.508158 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.530119 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b649-account-create-update-htrrc"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.574588 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-8xcns"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.575769 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.593300 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8xcns"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.605249 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fdbb7\" (UniqueName: \"kubernetes.io/projected/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-kube-api-access-fdbb7\") pod \"nova-cell0-db-create-mpcv6\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.605327 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvc9s\" (UniqueName: \"kubernetes.io/projected/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-kube-api-access-rvc9s\") pod \"nova-api-b649-account-create-update-htrrc\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.605369 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-operator-scripts\") pod \"nova-cell0-db-create-mpcv6\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.605426 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px662\" (UniqueName: \"kubernetes.io/projected/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-kube-api-access-px662\") pod \"nova-cell1-db-create-8xcns\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.605485 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-operator-scripts\") pod \"nova-api-b649-account-create-update-htrrc\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.605550 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-operator-scripts\") pod \"nova-cell1-db-create-8xcns\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.606872 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-operator-scripts\") pod \"nova-cell0-db-create-mpcv6\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.616159 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.650259 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fdbb7\" (UniqueName: \"kubernetes.io/projected/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-kube-api-access-fdbb7\") pod \"nova-cell0-db-create-mpcv6\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.689593 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-c61d-account-create-update-n5zhs"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.691037 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.694634 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.697011 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.700562 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c61d-account-create-update-n5zhs"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.712877 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px662\" (UniqueName: \"kubernetes.io/projected/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-kube-api-access-px662\") pod \"nova-cell1-db-create-8xcns\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.712995 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-operator-scripts\") pod \"nova-api-b649-account-create-update-htrrc\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.713121 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-operator-scripts\") pod \"nova-cell1-db-create-8xcns\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.713306 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvc9s\" (UniqueName: \"kubernetes.io/projected/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-kube-api-access-rvc9s\") pod \"nova-api-b649-account-create-update-htrrc\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.714537 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-operator-scripts\") pod \"nova-api-b649-account-create-update-htrrc\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.716932 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-operator-scripts\") pod \"nova-cell1-db-create-8xcns\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.747652 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px662\" (UniqueName: \"kubernetes.io/projected/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-kube-api-access-px662\") pod \"nova-cell1-db-create-8xcns\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.751030 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvc9s\" (UniqueName: \"kubernetes.io/projected/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-kube-api-access-rvc9s\") pod \"nova-api-b649-account-create-update-htrrc\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.815031 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aea1974a-8997-47fe-9c50-26387876a96a-operator-scripts\") pod \"nova-cell0-c61d-account-create-update-n5zhs\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.815104 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8nhq\" (UniqueName: \"kubernetes.io/projected/aea1974a-8997-47fe-9c50-26387876a96a-kube-api-access-h8nhq\") pod \"nova-cell0-c61d-account-create-update-n5zhs\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.844037 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.877157 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-e1a8-account-create-update-cms92"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.880071 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.884977 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.886322 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-e1a8-account-create-update-cms92"] Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.904781 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.916438 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aea1974a-8997-47fe-9c50-26387876a96a-operator-scripts\") pod \"nova-cell0-c61d-account-create-update-n5zhs\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.916495 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8nhq\" (UniqueName: \"kubernetes.io/projected/aea1974a-8997-47fe-9c50-26387876a96a-kube-api-access-h8nhq\") pod \"nova-cell0-c61d-account-create-update-n5zhs\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.916856 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c07df634-8325-4942-b8a1-7764cd036d1f-operator-scripts\") pod \"nova-cell1-e1a8-account-create-update-cms92\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.916976 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28mqk\" (UniqueName: \"kubernetes.io/projected/c07df634-8325-4942-b8a1-7764cd036d1f-kube-api-access-28mqk\") pod \"nova-cell1-e1a8-account-create-update-cms92\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.917194 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aea1974a-8997-47fe-9c50-26387876a96a-operator-scripts\") pod \"nova-cell0-c61d-account-create-update-n5zhs\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.934355 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8nhq\" (UniqueName: \"kubernetes.io/projected/aea1974a-8997-47fe-9c50-26387876a96a-kube-api-access-h8nhq\") pod \"nova-cell0-c61d-account-create-update-n5zhs\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.962857 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.962923 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.988553 4706 generic.go:334] "Generic (PLEG): container finished" podID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerID="408b2e7ba439a70138bf8da900757aff07d11b2f851d0db013299a0b38895e7f" exitCode=143 Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.988598 4706 generic.go:334] "Generic (PLEG): container finished" podID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerID="f3523dfa7e90a128a77e492e27b44740a88cec7e2e55cbd77e99fddb20e37054" exitCode=143 Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.988676 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe58f8d9-1234-4aab-9eec-6e2bad482002","Type":"ContainerDied","Data":"408b2e7ba439a70138bf8da900757aff07d11b2f851d0db013299a0b38895e7f"} Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.988711 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe58f8d9-1234-4aab-9eec-6e2bad482002","Type":"ContainerDied","Data":"f3523dfa7e90a128a77e492e27b44740a88cec7e2e55cbd77e99fddb20e37054"} Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.998285 4706 generic.go:334] "Generic (PLEG): container finished" podID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerID="fef4867fdb51598c34511e38a1fea2ee0b5cdfa65f66aae9de3bec3c7a27a6de" exitCode=143 Dec 06 05:47:35 crc kubenswrapper[4706]: I1206 05:47:35.998330 4706 generic.go:334] "Generic (PLEG): container finished" podID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerID="f280a5422ec69deb2c0e77890300be8e9237d9012597ec59ec79111e30969208" exitCode=143 Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:35.998674 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d","Type":"ContainerDied","Data":"fef4867fdb51598c34511e38a1fea2ee0b5cdfa65f66aae9de3bec3c7a27a6de"} Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:35.998748 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d","Type":"ContainerDied","Data":"f280a5422ec69deb2c0e77890300be8e9237d9012597ec59ec79111e30969208"} Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.014340 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.021918 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28mqk\" (UniqueName: \"kubernetes.io/projected/c07df634-8325-4942-b8a1-7764cd036d1f-kube-api-access-28mqk\") pod \"nova-cell1-e1a8-account-create-update-cms92\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.022194 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c07df634-8325-4942-b8a1-7764cd036d1f-operator-scripts\") pod \"nova-cell1-e1a8-account-create-update-cms92\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.022934 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c07df634-8325-4942-b8a1-7764cd036d1f-operator-scripts\") pod \"nova-cell1-e1a8-account-create-update-cms92\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.038571 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28mqk\" (UniqueName: \"kubernetes.io/projected/c07df634-8325-4942-b8a1-7764cd036d1f-kube-api-access-28mqk\") pod \"nova-cell1-e1a8-account-create-update-cms92\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.204964 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:36 crc kubenswrapper[4706]: I1206 05:47:36.809626 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jtjm9" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" probeResult="failure" output=< Dec 06 05:47:36 crc kubenswrapper[4706]: timeout: failed to connect service ":50051" within 1s Dec 06 05:47:36 crc kubenswrapper[4706]: > Dec 06 05:47:39 crc kubenswrapper[4706]: I1206 05:47:39.072728 4706 generic.go:334] "Generic (PLEG): container finished" podID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerID="bbcda477d3fd9a6ff83b8f8fdf3485c2cf6162604ff6d9989953a968d572e160" exitCode=0 Dec 06 05:47:39 crc kubenswrapper[4706]: I1206 05:47:39.072785 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a29d8d3-8bd3-493f-b40c-89104b4d3a02","Type":"ContainerDied","Data":"bbcda477d3fd9a6ff83b8f8fdf3485c2cf6162604ff6d9989953a968d572e160"} Dec 06 05:47:40 crc kubenswrapper[4706]: I1206 05:47:40.541322 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:47:40 crc kubenswrapper[4706]: I1206 05:47:40.642896 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-pt598"] Dec 06 05:47:40 crc kubenswrapper[4706]: I1206 05:47:40.643203 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="dnsmasq-dns" containerID="cri-o://194625a5c5d0487f17a2cb5a08096844bbb56ebba4bc3708ca77c89ff6d83399" gracePeriod=10 Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.092942 4706 generic.go:334] "Generic (PLEG): container finished" podID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerID="194625a5c5d0487f17a2cb5a08096844bbb56ebba4bc3708ca77c89ff6d83399" exitCode=0 Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.092995 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" event={"ID":"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb","Type":"ContainerDied","Data":"194625a5c5d0487f17a2cb5a08096844bbb56ebba4bc3708ca77c89ff6d83399"} Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.162915 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.590423 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.591303 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.713166 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757693 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-config-data\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757775 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxl84\" (UniqueName: \"kubernetes.io/projected/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-kube-api-access-fxl84\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757825 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-tls-certs\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757841 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-combined-ca-bundle\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757869 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-secret-key\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757890 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-scripts\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.757937 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-logs\") pod \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\" (UID: \"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf\") " Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.758834 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-logs" (OuterVolumeSpecName: "logs") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.763163 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-kube-api-access-fxl84" (OuterVolumeSpecName: "kube-api-access-fxl84") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "kube-api-access-fxl84". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.763631 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.785126 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-config-data" (OuterVolumeSpecName: "config-data") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.790538 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.790660 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-scripts" (OuterVolumeSpecName: "scripts") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.834147 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" (UID: "ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860124 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860162 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxl84\" (UniqueName: \"kubernetes.io/projected/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-kube-api-access-fxl84\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860176 4706 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860186 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860195 4706 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860208 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.860216 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:41 crc kubenswrapper[4706]: I1206 05:47:41.891950 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.129507 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7f979b84f6-hzq85" event={"ID":"ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf","Type":"ContainerDied","Data":"2a4fd40e570cd26faddc1bca7446994984d187e221497c601ad93e2dca508d92"} Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.129570 4706 scope.go:117] "RemoveContainer" containerID="4690ec4d5b8ee63e514a007ab5967eb43caa526a3ad17a05261a250f9e8efa62" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.129730 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7f979b84f6-hzq85" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.171178 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-7f979b84f6-hzq85"] Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.183392 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-7f979b84f6-hzq85"] Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.260624 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.382508 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-config\") pod \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.382792 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v4nh9\" (UniqueName: \"kubernetes.io/projected/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-kube-api-access-v4nh9\") pod \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.382832 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-svc\") pod \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.382875 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-swift-storage-0\") pod \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.382939 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-nb\") pod \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.383020 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-sb\") pod \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\" (UID: \"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb\") " Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.408444 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-kube-api-access-v4nh9" (OuterVolumeSpecName: "kube-api-access-v4nh9") pod "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" (UID: "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb"). InnerVolumeSpecName "kube-api-access-v4nh9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.457838 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" (UID: "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.491371 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.491793 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v4nh9\" (UniqueName: \"kubernetes.io/projected/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-kube-api-access-v4nh9\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.496981 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" (UID: "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.508276 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" (UID: "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.510415 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" (UID: "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.524923 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-config" (OuterVolumeSpecName: "config") pod "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" (UID: "9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.596520 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.596556 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.596568 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.596578 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.678720 4706 scope.go:117] "RemoveContainer" containerID="5a6bfebf319d4eb3f9bbfd4b2c9f92dbf46c782a519f3895cca98b4760ce3f3b" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.877346 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 06 05:47:42 crc kubenswrapper[4706]: I1206 05:47:42.988245 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-mpcv6"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.005328 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data-custom\") pod \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.005388 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data\") pod \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.005421 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-scripts\") pod \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.005494 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-combined-ca-bundle\") pod \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.005531 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-etc-machine-id\") pod \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.005627 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwv8s\" (UniqueName: \"kubernetes.io/projected/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-kube-api-access-nwv8s\") pod \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\" (UID: \"1a29d8d3-8bd3-493f-b40c-89104b4d3a02\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.012266 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-scripts" (OuterVolumeSpecName: "scripts") pod "1a29d8d3-8bd3-493f-b40c-89104b4d3a02" (UID: "1a29d8d3-8bd3-493f-b40c-89104b4d3a02"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.014340 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-kube-api-access-nwv8s" (OuterVolumeSpecName: "kube-api-access-nwv8s") pod "1a29d8d3-8bd3-493f-b40c-89104b4d3a02" (UID: "1a29d8d3-8bd3-493f-b40c-89104b4d3a02"). InnerVolumeSpecName "kube-api-access-nwv8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.015011 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "1a29d8d3-8bd3-493f-b40c-89104b4d3a02" (UID: "1a29d8d3-8bd3-493f-b40c-89104b4d3a02"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.017839 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1a29d8d3-8bd3-493f-b40c-89104b4d3a02" (UID: "1a29d8d3-8bd3-493f-b40c-89104b4d3a02"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.112372 4706 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data-custom\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.112493 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.112633 4706 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-etc-machine-id\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.112690 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwv8s\" (UniqueName: \"kubernetes.io/projected/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-kube-api-access-nwv8s\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.130232 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a29d8d3-8bd3-493f-b40c-89104b4d3a02" (UID: "1a29d8d3-8bd3-493f-b40c-89104b4d3a02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.147733 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" event={"ID":"9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb","Type":"ContainerDied","Data":"a41349888dd4856bee8d646221b5e8351270fb478eb1df8c953aa80a61524761"} Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.147782 4706 scope.go:117] "RemoveContainer" containerID="194625a5c5d0487f17a2cb5a08096844bbb56ebba4bc3708ca77c89ff6d83399" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.147877 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.158404 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerStarted","Data":"23a30859c2c49556a518ddbc641aeb0c4a216fb10e2b452546a9eec5ec4ec41f"} Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.161536 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpcv6" event={"ID":"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da","Type":"ContainerStarted","Data":"717f9547432e46459a5dfab8d76728da1b89b012cd811e787cfdad8893de8740"} Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.170650 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.173676 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"1a29d8d3-8bd3-493f-b40c-89104b4d3a02","Type":"ContainerDied","Data":"51ed28cd2c78cf796733e4289cc238737fc9797d3a28f59bc7191f8d364e2f8e"} Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.196679 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-pt598"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.214898 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data" (OuterVolumeSpecName: "config-data") pod "1a29d8d3-8bd3-493f-b40c-89104b4d3a02" (UID: "1a29d8d3-8bd3-493f-b40c-89104b4d3a02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.214960 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.216312 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-789c5c5cb7-pt598"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.236361 4706 scope.go:117] "RemoveContainer" containerID="bfb790d3faf672e0d647e9b2a8debec8a9273af06752ba604007d2ebb993eede" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.261210 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.307431 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.317333 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-scripts\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.317541 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-httpd-run\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.317571 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-logs\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.318034 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-config-data\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.318126 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lvh7s\" (UniqueName: \"kubernetes.io/projected/fe58f8d9-1234-4aab-9eec-6e2bad482002-kube-api-access-lvh7s\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.318189 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.318317 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-combined-ca-bundle\") pod \"fe58f8d9-1234-4aab-9eec-6e2bad482002\" (UID: \"fe58f8d9-1234-4aab-9eec-6e2bad482002\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.318827 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1a29d8d3-8bd3-493f-b40c-89104b4d3a02-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.321166 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.321718 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-logs" (OuterVolumeSpecName: "logs") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.322564 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-scripts" (OuterVolumeSpecName: "scripts") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.336142 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.342333 4706 scope.go:117] "RemoveContainer" containerID="c0613fb5cf6d378604fa3a0322447e04b36d8f54a054a76970c08d0d62e9b661" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.348514 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe58f8d9-1234-4aab-9eec-6e2bad482002-kube-api-access-lvh7s" (OuterVolumeSpecName: "kube-api-access-lvh7s") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "kube-api-access-lvh7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.376437 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.379316 4706 scope.go:117] "RemoveContainer" containerID="bbcda477d3fd9a6ff83b8f8fdf3485c2cf6162604ff6d9989953a968d572e160" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.379846 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.380440 4706 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.386469 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-config-data" (OuterVolumeSpecName: "config-data") pod "fe58f8d9-1234-4aab-9eec-6e2bad482002" (UID: "fe58f8d9-1234-4aab-9eec-6e2bad482002"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.398556 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-789868f976-vz5nh" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422185 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt52z\" (UniqueName: \"kubernetes.io/projected/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-kube-api-access-gt52z\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422346 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422438 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-scripts\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422483 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-logs\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422506 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-combined-ca-bundle\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422560 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-httpd-run\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422601 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-config-data\") pod \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\" (UID: \"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d\") " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422955 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422974 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422983 4706 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.422993 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe58f8d9-1234-4aab-9eec-6e2bad482002-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.423001 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe58f8d9-1234-4aab-9eec-6e2bad482002-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.423010 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lvh7s\" (UniqueName: \"kubernetes.io/projected/fe58f8d9-1234-4aab-9eec-6e2bad482002-kube-api-access-lvh7s\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.423027 4706 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.423150 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-logs" (OuterVolumeSpecName: "logs") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.423519 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.431915 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-kube-api-access-gt52z" (OuterVolumeSpecName: "kube-api-access-gt52z") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "kube-api-access-gt52z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.442234 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-scripts" (OuterVolumeSpecName: "scripts") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.444449 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "glance") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.511524 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-46nqn"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.511878 4706 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.526419 4706 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.526595 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.526606 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.526614 4706 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-httpd-run\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.526622 4706 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.526631 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt52z\" (UniqueName: \"kubernetes.io/projected/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-kube-api-access-gt52z\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.551815 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-config-data" (OuterVolumeSpecName: "config-data") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.573452 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-e1a8-account-create-update-cms92"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.599169 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" (UID: "253e98d0-d2d8-42e4-a0ff-b4e6bb53036d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.600927 4706 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.617195 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.632676 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.632699 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.632710 4706 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.788533 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.865852 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.881172 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.881388 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.902570 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.902917 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="init" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.902936 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="init" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.902947 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-log" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.902953 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-log" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.902974 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-log" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.902980 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-log" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.902992 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-httpd" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.902998 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-httpd" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.903012 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-httpd" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903018 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-httpd" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.903029 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903034 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.903076 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon-log" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903084 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon-log" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.903100 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="probe" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903106 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="probe" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.903114 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="cinder-scheduler" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903120 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="cinder-scheduler" Dec 06 05:47:43 crc kubenswrapper[4706]: E1206 05:47:43.903132 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="dnsmasq-dns" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903137 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="dnsmasq-dns" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903295 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="cinder-scheduler" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903307 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-httpd" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903320 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-httpd" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903332 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="dnsmasq-dns" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903340 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon-log" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903349 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" containerName="glance-log" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903359 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" containerName="probe" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903366 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" containerName="glance-log" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.903377 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.904474 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.908524 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.928790 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-b649-account-create-update-htrrc"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.940288 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-c61d-account-create-update-n5zhs"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.949017 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.962829 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-8xcns"] Dec 06 05:47:43 crc kubenswrapper[4706]: I1206 05:47:43.985953 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-7f979b84f6-hzq85" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.142:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.042645 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvnc5\" (UniqueName: \"kubernetes.io/projected/9823a9c2-7e13-4c23-a9ea-af6e03c32773-kube-api-access-qvnc5\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.042699 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.042721 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.042769 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-scripts\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.042797 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-config-data\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.042828 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9823a9c2-7e13-4c23-a9ea-af6e03c32773-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.057358 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a29d8d3-8bd3-493f-b40c-89104b4d3a02" path="/var/lib/kubelet/pods/1a29d8d3-8bd3-493f-b40c-89104b4d3a02/volumes" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.058032 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" path="/var/lib/kubelet/pods/9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb/volumes" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.062764 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf" path="/var/lib/kubelet/pods/ea28a9b9-eda7-4e4a-83d1-ebfac91fa4bf/volumes" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.144984 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.145033 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.145108 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-scripts\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.145139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-config-data\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.145184 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9823a9c2-7e13-4c23-a9ea-af6e03c32773-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.145242 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvnc5\" (UniqueName: \"kubernetes.io/projected/9823a9c2-7e13-4c23-a9ea-af6e03c32773-kube-api-access-qvnc5\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.145577 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9823a9c2-7e13-4c23-a9ea-af6e03c32773-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.149895 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-config-data\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.150464 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-scripts\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.155181 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.160605 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9823a9c2-7e13-4c23-a9ea-af6e03c32773-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.165957 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvnc5\" (UniqueName: \"kubernetes.io/projected/9823a9c2-7e13-4c23-a9ea-af6e03c32773-kube-api-access-qvnc5\") pod \"cinder-scheduler-0\" (UID: \"9823a9c2-7e13-4c23-a9ea-af6e03c32773\") " pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.191549 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" event={"ID":"aea1974a-8997-47fe-9c50-26387876a96a","Type":"ContainerStarted","Data":"2c8ea62b180663b4966913e46d55e3ab4060405d9d078ef09d4b10e84e6bff44"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.196692 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" event={"ID":"c07df634-8325-4942-b8a1-7764cd036d1f","Type":"ContainerStarted","Data":"f9b7e5d75b2c7ac097e516286787364f9bf12f4d3ce099e401939c6908283664"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.217615 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b649-account-create-update-htrrc" event={"ID":"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f","Type":"ContainerStarted","Data":"b1cef3752cca7bd5adfbf81ce99832da853345bbf39807e97a589227b0074e85"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.227092 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.227593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fe58f8d9-1234-4aab-9eec-6e2bad482002","Type":"ContainerDied","Data":"4cc059581b4b52906d36236556ab96beebc94db2957d35847768a60243a8cd7b"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.227645 4706 scope.go:117] "RemoveContainer" containerID="408b2e7ba439a70138bf8da900757aff07d11b2f851d0db013299a0b38895e7f" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.228298 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.238279 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"253e98d0-d2d8-42e4-a0ff-b4e6bb53036d","Type":"ContainerDied","Data":"82a1bd9d74e600e9ae6443fe92519d1272a966d1f1aec3509aa608efdf0a22a4"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.238423 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.264741 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.276381 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpcv6" event={"ID":"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da","Type":"ContainerStarted","Data":"9eb7527fd6a16530ec86a0b09cf651ea3e82aa77480121303f71b0dcc3752223"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.283028 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.288615 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"2d5b5a38-b853-47de-ada1-1d7c240e84e4","Type":"ContainerStarted","Data":"434f21175efceb8743362450527aa5bda10ea003d861a968ec89877e3f33ab7a"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.293466 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8xcns" event={"ID":"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4","Type":"ContainerStarted","Data":"3992e5923d51091a4f45751a994fcd24e80a450b545f8309e1f3f4c6221c0151"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.294823 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.306525 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-46nqn" event={"ID":"f16bb998-03b9-4bd9-93d4-9965fd119d32","Type":"ContainerStarted","Data":"bc8657f6bc16337a1a3e9375dbfc8419270ee6ad703c397abddd45233f6ca533"} Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.335662 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.347208 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.351369 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.355282 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.355659 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.355883 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.356004 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mw9mc" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.390295 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.393279 4706 scope.go:117] "RemoveContainer" containerID="f3523dfa7e90a128a77e492e27b44740a88cec7e2e55cbd77e99fddb20e37054" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.408455 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.410184 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.413032 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.413239 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.413354 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-mpcv6" podStartSLOduration=9.413336486 podStartE2EDuration="9.413336486s" podCreationTimestamp="2025-12-06 05:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:44.296671275 +0000 UTC m=+1686.624495219" watchObservedRunningTime="2025-12-06 05:47:44.413336486 +0000 UTC m=+1686.741160420" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450100 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-logs\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450484 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450535 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450561 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450589 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sq2ld\" (UniqueName: \"kubernetes.io/projected/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-kube-api-access-sq2ld\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450608 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450638 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.450657 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.482817 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.526694 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=4.137674322 podStartE2EDuration="25.526671248s" podCreationTimestamp="2025-12-06 05:47:19 +0000 UTC" firstStartedPulling="2025-12-06 05:47:20.939137301 +0000 UTC m=+1663.266961245" lastFinishedPulling="2025-12-06 05:47:42.328134237 +0000 UTC m=+1684.655958171" observedRunningTime="2025-12-06 05:47:44.318539375 +0000 UTC m=+1686.646363329" watchObservedRunningTime="2025-12-06 05:47:44.526671248 +0000 UTC m=+1686.854495192" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553189 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553245 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-scripts\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553281 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553313 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553338 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sq2ld\" (UniqueName: \"kubernetes.io/projected/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-kube-api-access-sq2ld\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553362 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553390 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553413 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c50f78da-9727-4908-ba76-4a3dbc4455c7-logs\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553441 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553465 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qhdhv\" (UniqueName: \"kubernetes.io/projected/c50f78da-9727-4908-ba76-4a3dbc4455c7-kube-api-access-qhdhv\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553485 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553519 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553549 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-config-data\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553591 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c50f78da-9727-4908-ba76-4a3dbc4455c7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553623 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.553654 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-logs\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.554132 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-logs\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.554380 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.556485 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.574734 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.583922 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.585604 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sq2ld\" (UniqueName: \"kubernetes.io/projected/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-kube-api-access-sq2ld\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.591698 4706 scope.go:117] "RemoveContainer" containerID="fef4867fdb51598c34511e38a1fea2ee0b5cdfa65f66aae9de3bec3c7a27a6de" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.596041 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.617110 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fdfe9ea0-e897-4071-9b1c-dcdd908b549d-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658095 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c50f78da-9727-4908-ba76-4a3dbc4455c7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658196 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658252 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-scripts\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658316 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658334 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c50f78da-9727-4908-ba76-4a3dbc4455c7-logs\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658362 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qhdhv\" (UniqueName: \"kubernetes.io/projected/c50f78da-9727-4908-ba76-4a3dbc4455c7-kube-api-access-qhdhv\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658390 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.658411 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-config-data\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.667101 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c50f78da-9727-4908-ba76-4a3dbc4455c7-logs\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.667826 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.680548 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c50f78da-9727-4908-ba76-4a3dbc4455c7-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.681786 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-config-data\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.690661 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.707076 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-scripts\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.715323 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c50f78da-9727-4908-ba76-4a3dbc4455c7-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.715851 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-internal-api-0\" (UID: \"fdfe9ea0-e897-4071-9b1c-dcdd908b549d\") " pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.725328 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.731170 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qhdhv\" (UniqueName: \"kubernetes.io/projected/c50f78da-9727-4908-ba76-4a3dbc4455c7-kube-api-access-qhdhv\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.784172 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"glance-default-external-api-0\" (UID: \"c50f78da-9727-4908-ba76-4a3dbc4455c7\") " pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.821078 4706 scope.go:117] "RemoveContainer" containerID="f280a5422ec69deb2c0e77890300be8e9237d9012597ec59ec79111e30969208" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.836915 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.843205 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5c45f4d87f-7sd44" Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.920616 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-565566dfbd-5h6dj"] Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.920987 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-565566dfbd-5h6dj" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-httpd" containerID="cri-o://31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6" gracePeriod=30 Dec 06 05:47:44 crc kubenswrapper[4706]: I1206 05:47:44.920935 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-565566dfbd-5h6dj" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-api" containerID="cri-o://0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d" gracePeriod=30 Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.104541 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.338039 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" event={"ID":"aea1974a-8997-47fe-9c50-26387876a96a","Type":"ContainerStarted","Data":"4e5fa7c61a33dcba81caa08304cd477c9adc99b2f1ccc89c3a3eee8d9e72b64e"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.344240 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8xcns" event={"ID":"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4","Type":"ContainerStarted","Data":"d93d5832287bcfef20e6d3118eb35f0b99cd0ea998cac2818de586f05efa6e83"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.349283 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-46nqn" event={"ID":"f16bb998-03b9-4bd9-93d4-9965fd119d32","Type":"ContainerStarted","Data":"06cfd71be6b79ca7c45f10048f870337836e1de93d78172a3304daa3b5743981"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.357148 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" podStartSLOduration=10.357131632 podStartE2EDuration="10.357131632s" podCreationTimestamp="2025-12-06 05:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:45.351573902 +0000 UTC m=+1687.679397866" watchObservedRunningTime="2025-12-06 05:47:45.357131632 +0000 UTC m=+1687.684955566" Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.374133 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-db-create-8xcns" podStartSLOduration=10.374118031 podStartE2EDuration="10.374118031s" podCreationTimestamp="2025-12-06 05:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:45.365163258 +0000 UTC m=+1687.692987222" watchObservedRunningTime="2025-12-06 05:47:45.374118031 +0000 UTC m=+1687.701941975" Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.393178 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b649-account-create-update-htrrc" event={"ID":"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f","Type":"ContainerStarted","Data":"124793da4ee8398b9eaa609f740ad8ed52568cb7727619a5b3c02ab729c97906"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.398116 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-46nqn" podStartSLOduration=10.398097238 podStartE2EDuration="10.398097238s" podCreationTimestamp="2025-12-06 05:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:45.389169237 +0000 UTC m=+1687.716993201" watchObservedRunningTime="2025-12-06 05:47:45.398097238 +0000 UTC m=+1687.725921182" Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.401637 4706 generic.go:334] "Generic (PLEG): container finished" podID="f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" containerID="9eb7527fd6a16530ec86a0b09cf651ea3e82aa77480121303f71b0dcc3752223" exitCode=0 Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.401705 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpcv6" event={"ID":"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da","Type":"ContainerDied","Data":"9eb7527fd6a16530ec86a0b09cf651ea3e82aa77480121303f71b0dcc3752223"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.406013 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9823a9c2-7e13-4c23-a9ea-af6e03c32773","Type":"ContainerStarted","Data":"78c31368a9c5391bacfd16829d72b2a6d8f27da6176aedf5b48edcc8f6a71761"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.416464 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerStarted","Data":"522917d6dbf674c02bff7cb1487a46a8823b4691a92be658ffc70bdae0972afe"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.419173 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" event={"ID":"c07df634-8325-4942-b8a1-7764cd036d1f","Type":"ContainerStarted","Data":"071f148d3d13ca4f06eb329307ee9036ac0873e1ebccdfef816cc464ff4d7602"} Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.421465 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-b649-account-create-update-htrrc" podStartSLOduration=10.421445009 podStartE2EDuration="10.421445009s" podCreationTimestamp="2025-12-06 05:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:45.41073075 +0000 UTC m=+1687.738554714" watchObservedRunningTime="2025-12-06 05:47:45.421445009 +0000 UTC m=+1687.749268953" Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.493232 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" podStartSLOduration=10.493212628 podStartE2EDuration="10.493212628s" podCreationTimestamp="2025-12-06 05:47:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:45.446483755 +0000 UTC m=+1687.774307689" watchObservedRunningTime="2025-12-06 05:47:45.493212628 +0000 UTC m=+1687.821036572" Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.596789 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Dec 06 05:47:45 crc kubenswrapper[4706]: W1206 05:47:45.634733 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfdfe9ea0_e897_4071_9b1c_dcdd908b549d.slice/crio-a47fa6cedca9dcfd673d7f713f1b670d17b0cbdf77c36aaece5f2bb30835653a WatchSource:0}: Error finding container a47fa6cedca9dcfd673d7f713f1b670d17b0cbdf77c36aaece5f2bb30835653a: Status 404 returned error can't find the container with id a47fa6cedca9dcfd673d7f713f1b670d17b0cbdf77c36aaece5f2bb30835653a Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.812912 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.844012 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:45 crc kubenswrapper[4706]: I1206 05:47:45.930872 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.052784 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="253e98d0-d2d8-42e4-a0ff-b4e6bb53036d" path="/var/lib/kubelet/pods/253e98d0-d2d8-42e4-a0ff-b4e6bb53036d/volumes" Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.054180 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe58f8d9-1234-4aab-9eec-6e2bad482002" path="/var/lib/kubelet/pods/fe58f8d9-1234-4aab-9eec-6e2bad482002/volumes" Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.082232 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jtjm9"] Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.474666 4706 generic.go:334] "Generic (PLEG): container finished" podID="c07df634-8325-4942-b8a1-7764cd036d1f" containerID="071f148d3d13ca4f06eb329307ee9036ac0873e1ebccdfef816cc464ff4d7602" exitCode=0 Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.477621 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" event={"ID":"c07df634-8325-4942-b8a1-7764cd036d1f","Type":"ContainerDied","Data":"071f148d3d13ca4f06eb329307ee9036ac0873e1ebccdfef816cc464ff4d7602"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.495351 4706 generic.go:334] "Generic (PLEG): container finished" podID="aea1974a-8997-47fe-9c50-26387876a96a" containerID="4e5fa7c61a33dcba81caa08304cd477c9adc99b2f1ccc89c3a3eee8d9e72b64e" exitCode=0 Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.495471 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" event={"ID":"aea1974a-8997-47fe-9c50-26387876a96a","Type":"ContainerDied","Data":"4e5fa7c61a33dcba81caa08304cd477c9adc99b2f1ccc89c3a3eee8d9e72b64e"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.502173 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9823a9c2-7e13-4c23-a9ea-af6e03c32773","Type":"ContainerStarted","Data":"dfc4cd6e44655180f5301da5f2739b30a0a9faea4ad30f8cf2f0221a3817f5c8"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.528783 4706 generic.go:334] "Generic (PLEG): container finished" podID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerID="31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6" exitCode=0 Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.528859 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-565566dfbd-5h6dj" event={"ID":"4e512237-f0a0-4312-900f-5f8cd066f34c","Type":"ContainerDied","Data":"31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.536416 4706 generic.go:334] "Generic (PLEG): container finished" podID="956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" containerID="124793da4ee8398b9eaa609f740ad8ed52568cb7727619a5b3c02ab729c97906" exitCode=0 Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.536477 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b649-account-create-update-htrrc" event={"ID":"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f","Type":"ContainerDied","Data":"124793da4ee8398b9eaa609f740ad8ed52568cb7727619a5b3c02ab729c97906"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.539546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c50f78da-9727-4908-ba76-4a3dbc4455c7","Type":"ContainerStarted","Data":"e7f1a3bbc1e7fa4cde0e2e72eb91524112737a3e2a6c255a0282382b380117b6"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.549463 4706 generic.go:334] "Generic (PLEG): container finished" podID="38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" containerID="d93d5832287bcfef20e6d3118eb35f0b99cd0ea998cac2818de586f05efa6e83" exitCode=0 Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.549518 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8xcns" event={"ID":"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4","Type":"ContainerDied","Data":"d93d5832287bcfef20e6d3118eb35f0b99cd0ea998cac2818de586f05efa6e83"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.569214 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fdfe9ea0-e897-4071-9b1c-dcdd908b549d","Type":"ContainerStarted","Data":"a47fa6cedca9dcfd673d7f713f1b670d17b0cbdf77c36aaece5f2bb30835653a"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.572282 4706 generic.go:334] "Generic (PLEG): container finished" podID="f16bb998-03b9-4bd9-93d4-9965fd119d32" containerID="06cfd71be6b79ca7c45f10048f870337836e1de93d78172a3304daa3b5743981" exitCode=0 Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.572346 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-46nqn" event={"ID":"f16bb998-03b9-4bd9-93d4-9965fd119d32","Type":"ContainerDied","Data":"06cfd71be6b79ca7c45f10048f870337836e1de93d78172a3304daa3b5743981"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.576957 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerStarted","Data":"3d9dee6ddb4f2552cf77c605768981fec6ef094cc02937e23efa5652873032ce"} Dec 06 05:47:46 crc kubenswrapper[4706]: I1206 05:47:46.715493 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-789c5c5cb7-pt598" podUID="9a30a8df-fdae-4d26-8ff6-b2a6ca5895cb" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.155:5353: i/o timeout" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.050247 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.226035 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-operator-scripts\") pod \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.226141 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fdbb7\" (UniqueName: \"kubernetes.io/projected/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-kube-api-access-fdbb7\") pod \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\" (UID: \"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da\") " Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.226799 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" (UID: "f7867b3a-9ee2-4c8e-a401-7181a5c4a9da"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.232754 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-kube-api-access-fdbb7" (OuterVolumeSpecName: "kube-api-access-fdbb7") pod "f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" (UID: "f7867b3a-9ee2-4c8e-a401-7181a5c4a9da"). InnerVolumeSpecName "kube-api-access-fdbb7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.334454 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.334497 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fdbb7\" (UniqueName: \"kubernetes.io/projected/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da-kube-api-access-fdbb7\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.609339 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"9823a9c2-7e13-4c23-a9ea-af6e03c32773","Type":"ContainerStarted","Data":"c2912dc58c0e66db6bc8c8bad4ee99e41aa75a24a6b4ea591fd843636ca92ac8"} Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.636309 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.63629091 podStartE2EDuration="4.63629091s" podCreationTimestamp="2025-12-06 05:47:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:47.634717198 +0000 UTC m=+1689.962541142" watchObservedRunningTime="2025-12-06 05:47:47.63629091 +0000 UTC m=+1689.964114844" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.643410 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fdfe9ea0-e897-4071-9b1c-dcdd908b549d","Type":"ContainerStarted","Data":"af759f6f25b8ce012edcfbb5cde8d965be76a53e9370ed1f18034fa6c69c441c"} Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.647893 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c50f78da-9727-4908-ba76-4a3dbc4455c7","Type":"ContainerStarted","Data":"ccb69b458a8ec90bd93e4d18acbde7c1da053f0326fe1565c2e29e3ceefa65c1"} Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.651550 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-mpcv6" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.651750 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-mpcv6" event={"ID":"f7867b3a-9ee2-4c8e-a401-7181a5c4a9da","Type":"ContainerDied","Data":"717f9547432e46459a5dfab8d76728da1b89b012cd811e787cfdad8893de8740"} Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.653323 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="717f9547432e46459a5dfab8d76728da1b89b012cd811e787cfdad8893de8740" Dec 06 05:47:47 crc kubenswrapper[4706]: I1206 05:47:47.652138 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jtjm9" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" containerID="cri-o://2beae595b6bfed7e68bbadce1618e970d6f47704636c0fa9ba8bffa9dc2a17cb" gracePeriod=2 Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.171289 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.352722 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28mqk\" (UniqueName: \"kubernetes.io/projected/c07df634-8325-4942-b8a1-7764cd036d1f-kube-api-access-28mqk\") pod \"c07df634-8325-4942-b8a1-7764cd036d1f\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.352786 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c07df634-8325-4942-b8a1-7764cd036d1f-operator-scripts\") pod \"c07df634-8325-4942-b8a1-7764cd036d1f\" (UID: \"c07df634-8325-4942-b8a1-7764cd036d1f\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.353507 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c07df634-8325-4942-b8a1-7764cd036d1f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c07df634-8325-4942-b8a1-7764cd036d1f" (UID: "c07df634-8325-4942-b8a1-7764cd036d1f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.360351 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c07df634-8325-4942-b8a1-7764cd036d1f-kube-api-access-28mqk" (OuterVolumeSpecName: "kube-api-access-28mqk") pod "c07df634-8325-4942-b8a1-7764cd036d1f" (UID: "c07df634-8325-4942-b8a1-7764cd036d1f"). InnerVolumeSpecName "kube-api-access-28mqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.396601 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.413972 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.428849 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.441468 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.454970 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c07df634-8325-4942-b8a1-7764cd036d1f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.455010 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28mqk\" (UniqueName: \"kubernetes.io/projected/c07df634-8325-4942-b8a1-7764cd036d1f-kube-api-access-28mqk\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.555831 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f16bb998-03b9-4bd9-93d4-9965fd119d32-operator-scripts\") pod \"f16bb998-03b9-4bd9-93d4-9965fd119d32\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.555885 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aea1974a-8997-47fe-9c50-26387876a96a-operator-scripts\") pod \"aea1974a-8997-47fe-9c50-26387876a96a\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.555940 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvc9s\" (UniqueName: \"kubernetes.io/projected/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-kube-api-access-rvc9s\") pod \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.555976 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8nhq\" (UniqueName: \"kubernetes.io/projected/aea1974a-8997-47fe-9c50-26387876a96a-kube-api-access-h8nhq\") pod \"aea1974a-8997-47fe-9c50-26387876a96a\" (UID: \"aea1974a-8997-47fe-9c50-26387876a96a\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556088 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwjw4\" (UniqueName: \"kubernetes.io/projected/f16bb998-03b9-4bd9-93d4-9965fd119d32-kube-api-access-kwjw4\") pod \"f16bb998-03b9-4bd9-93d4-9965fd119d32\" (UID: \"f16bb998-03b9-4bd9-93d4-9965fd119d32\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556164 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-operator-scripts\") pod \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\" (UID: \"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556214 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px662\" (UniqueName: \"kubernetes.io/projected/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-kube-api-access-px662\") pod \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556246 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-operator-scripts\") pod \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\" (UID: \"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4\") " Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556486 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aea1974a-8997-47fe-9c50-26387876a96a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "aea1974a-8997-47fe-9c50-26387876a96a" (UID: "aea1974a-8997-47fe-9c50-26387876a96a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556786 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/aea1974a-8997-47fe-9c50-26387876a96a-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556814 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" (UID: "956e4870-4475-4d8e-a0c2-0ffefcfcbb1f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.556895 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f16bb998-03b9-4bd9-93d4-9965fd119d32-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f16bb998-03b9-4bd9-93d4-9965fd119d32" (UID: "f16bb998-03b9-4bd9-93d4-9965fd119d32"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.557226 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" (UID: "38fec6d0-a4dc-45b4-a7fb-7a185ce174e4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.560190 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f16bb998-03b9-4bd9-93d4-9965fd119d32-kube-api-access-kwjw4" (OuterVolumeSpecName: "kube-api-access-kwjw4") pod "f16bb998-03b9-4bd9-93d4-9965fd119d32" (UID: "f16bb998-03b9-4bd9-93d4-9965fd119d32"). InnerVolumeSpecName "kube-api-access-kwjw4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.560298 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aea1974a-8997-47fe-9c50-26387876a96a-kube-api-access-h8nhq" (OuterVolumeSpecName: "kube-api-access-h8nhq") pod "aea1974a-8997-47fe-9c50-26387876a96a" (UID: "aea1974a-8997-47fe-9c50-26387876a96a"). InnerVolumeSpecName "kube-api-access-h8nhq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.566026 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-kube-api-access-rvc9s" (OuterVolumeSpecName: "kube-api-access-rvc9s") pod "956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" (UID: "956e4870-4475-4d8e-a0c2-0ffefcfcbb1f"). InnerVolumeSpecName "kube-api-access-rvc9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.568233 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-kube-api-access-px662" (OuterVolumeSpecName: "kube-api-access-px662") pod "38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" (UID: "38fec6d0-a4dc-45b4-a7fb-7a185ce174e4"). InnerVolumeSpecName "kube-api-access-px662". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658215 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvc9s\" (UniqueName: \"kubernetes.io/projected/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-kube-api-access-rvc9s\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658249 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8nhq\" (UniqueName: \"kubernetes.io/projected/aea1974a-8997-47fe-9c50-26387876a96a-kube-api-access-h8nhq\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658258 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwjw4\" (UniqueName: \"kubernetes.io/projected/f16bb998-03b9-4bd9-93d4-9965fd119d32-kube-api-access-kwjw4\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658267 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658277 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px662\" (UniqueName: \"kubernetes.io/projected/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-kube-api-access-px662\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658285 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.658296 4706 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f16bb998-03b9-4bd9-93d4-9965fd119d32-operator-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.661388 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-b649-account-create-update-htrrc" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.661406 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-b649-account-create-update-htrrc" event={"ID":"956e4870-4475-4d8e-a0c2-0ffefcfcbb1f","Type":"ContainerDied","Data":"b1cef3752cca7bd5adfbf81ce99832da853345bbf39807e97a589227b0074e85"} Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.661444 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1cef3752cca7bd5adfbf81ce99832da853345bbf39807e97a589227b0074e85" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.663896 4706 generic.go:334] "Generic (PLEG): container finished" podID="05c17326-c953-41d3-97ea-d620f5535013" containerID="2beae595b6bfed7e68bbadce1618e970d6f47704636c0fa9ba8bffa9dc2a17cb" exitCode=0 Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.663960 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtjm9" event={"ID":"05c17326-c953-41d3-97ea-d620f5535013","Type":"ContainerDied","Data":"2beae595b6bfed7e68bbadce1618e970d6f47704636c0fa9ba8bffa9dc2a17cb"} Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.668416 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" event={"ID":"c07df634-8325-4942-b8a1-7764cd036d1f","Type":"ContainerDied","Data":"f9b7e5d75b2c7ac097e516286787364f9bf12f4d3ce099e401939c6908283664"} Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.668456 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f9b7e5d75b2c7ac097e516286787364f9bf12f4d3ce099e401939c6908283664" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.668420 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-e1a8-account-create-update-cms92" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.670562 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-8xcns" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.671708 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-8xcns" event={"ID":"38fec6d0-a4dc-45b4-a7fb-7a185ce174e4","Type":"ContainerDied","Data":"3992e5923d51091a4f45751a994fcd24e80a450b545f8309e1f3f4c6221c0151"} Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.671746 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3992e5923d51091a4f45751a994fcd24e80a450b545f8309e1f3f4c6221c0151" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.673676 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.673617 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-c61d-account-create-update-n5zhs" event={"ID":"aea1974a-8997-47fe-9c50-26387876a96a","Type":"ContainerDied","Data":"2c8ea62b180663b4966913e46d55e3ab4060405d9d078ef09d4b10e84e6bff44"} Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.674196 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c8ea62b180663b4966913e46d55e3ab4060405d9d078ef09d4b10e84e6bff44" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.679572 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-46nqn" Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.679610 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-46nqn" event={"ID":"f16bb998-03b9-4bd9-93d4-9965fd119d32","Type":"ContainerDied","Data":"bc8657f6bc16337a1a3e9375dbfc8419270ee6ad703c397abddd45233f6ca533"} Dec 06 05:47:48 crc kubenswrapper[4706]: I1206 05:47:48.679653 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bc8657f6bc16337a1a3e9375dbfc8419270ee6ad703c397abddd45233f6ca533" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.227489 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.561065 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.690711 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z9x5\" (UniqueName: \"kubernetes.io/projected/05c17326-c953-41d3-97ea-d620f5535013-kube-api-access-5z9x5\") pod \"05c17326-c953-41d3-97ea-d620f5535013\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.690895 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-catalog-content\") pod \"05c17326-c953-41d3-97ea-d620f5535013\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.691020 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-utilities\") pod \"05c17326-c953-41d3-97ea-d620f5535013\" (UID: \"05c17326-c953-41d3-97ea-d620f5535013\") " Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.691738 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-utilities" (OuterVolumeSpecName: "utilities") pod "05c17326-c953-41d3-97ea-d620f5535013" (UID: "05c17326-c953-41d3-97ea-d620f5535013"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.694202 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jtjm9" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.694198 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jtjm9" event={"ID":"05c17326-c953-41d3-97ea-d620f5535013","Type":"ContainerDied","Data":"f8b912a16b80fe34f5c18c2b4ac0170a427e9863e3e87cb08f570e8abecee370"} Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.694264 4706 scope.go:117] "RemoveContainer" containerID="2beae595b6bfed7e68bbadce1618e970d6f47704636c0fa9ba8bffa9dc2a17cb" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.700726 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05c17326-c953-41d3-97ea-d620f5535013-kube-api-access-5z9x5" (OuterVolumeSpecName: "kube-api-access-5z9x5") pod "05c17326-c953-41d3-97ea-d620f5535013" (UID: "05c17326-c953-41d3-97ea-d620f5535013"). InnerVolumeSpecName "kube-api-access-5z9x5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.703006 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"c50f78da-9727-4908-ba76-4a3dbc4455c7","Type":"ContainerStarted","Data":"aadb81cf2a54a8953cb5aae63672bf295be7072ee38097e719eb3f1a77a1b215"} Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.729165 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.7291413460000005 podStartE2EDuration="5.729141346s" podCreationTimestamp="2025-12-06 05:47:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:49.720233735 +0000 UTC m=+1692.048057679" watchObservedRunningTime="2025-12-06 05:47:49.729141346 +0000 UTC m=+1692.056965290" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.763926 4706 scope.go:117] "RemoveContainer" containerID="83a35db4459ebc38dc1591077e9cfd7dbc13ae5d74efb7d6802ddeb56fddc0f8" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.788798 4706 scope.go:117] "RemoveContainer" containerID="4712bc903ca987b517a63e4c7f2b58f2dc6d2496f0fab7da7dda1beb894316f5" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.793130 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5z9x5\" (UniqueName: \"kubernetes.io/projected/05c17326-c953-41d3-97ea-d620f5535013-kube-api-access-5z9x5\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.793153 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.813796 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "05c17326-c953-41d3-97ea-d620f5535013" (UID: "05c17326-c953-41d3-97ea-d620f5535013"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:49 crc kubenswrapper[4706]: I1206 05:47:49.897140 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/05c17326-c953-41d3-97ea-d620f5535013-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:50 crc kubenswrapper[4706]: I1206 05:47:50.030695 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jtjm9"] Dec 06 05:47:50 crc kubenswrapper[4706]: I1206 05:47:50.047838 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jtjm9"] Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.031861 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7gj9p"] Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032597 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="extract-content" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032613 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="extract-content" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032632 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032642 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032660 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032668 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032676 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c07df634-8325-4942-b8a1-7764cd036d1f" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032684 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c07df634-8325-4942-b8a1-7764cd036d1f" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032700 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aea1974a-8997-47fe-9c50-26387876a96a" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032707 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="aea1974a-8997-47fe-9c50-26387876a96a" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032722 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="extract-utilities" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032729 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="extract-utilities" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032753 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032778 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032796 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16bb998-03b9-4bd9-93d4-9965fd119d32" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032806 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16bb998-03b9-4bd9-93d4-9965fd119d32" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: E1206 05:47:51.032824 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.032836 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033085 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="05c17326-c953-41d3-97ea-d620f5535013" containerName="registry-server" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033105 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c07df634-8325-4942-b8a1-7764cd036d1f" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033120 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033141 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033156 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="aea1974a-8997-47fe-9c50-26387876a96a" containerName="mariadb-account-create-update" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033178 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f16bb998-03b9-4bd9-93d4-9965fd119d32" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033193 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" containerName="mariadb-database-create" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.033840 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.036796 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.036978 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.037470 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xfwtm" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.051592 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7gj9p"] Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.224606 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-config-data\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.224685 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm8gw\" (UniqueName: \"kubernetes.io/projected/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-kube-api-access-pm8gw\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.224870 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.224931 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-scripts\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.326523 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.326942 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-scripts\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.327204 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-config-data\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.327406 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm8gw\" (UniqueName: \"kubernetes.io/projected/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-kube-api-access-pm8gw\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.330917 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-scripts\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.331532 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.331662 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-config-data\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.352636 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm8gw\" (UniqueName: \"kubernetes.io/projected/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-kube-api-access-pm8gw\") pod \"nova-cell0-conductor-db-sync-7gj9p\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:51 crc kubenswrapper[4706]: I1206 05:47:51.650636 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:47:52 crc kubenswrapper[4706]: I1206 05:47:52.045745 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05c17326-c953-41d3-97ea-d620f5535013" path="/var/lib/kubelet/pods/05c17326-c953-41d3-97ea-d620f5535013/volumes" Dec 06 05:47:52 crc kubenswrapper[4706]: I1206 05:47:52.095272 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7gj9p"] Dec 06 05:47:52 crc kubenswrapper[4706]: I1206 05:47:52.732703 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" event={"ID":"1ff0bb4e-18a4-493e-a666-e94aa8bacea5","Type":"ContainerStarted","Data":"d502ccec2fb9a3bc05cd386f98c8a951c9f10d03d6d69bdb25317d69d588a4da"} Dec 06 05:47:52 crc kubenswrapper[4706]: I1206 05:47:52.901245 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-k8q9t"] Dec 06 05:47:52 crc kubenswrapper[4706]: I1206 05:47:52.926514 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:52 crc kubenswrapper[4706]: I1206 05:47:52.958672 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k8q9t"] Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.059748 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-utilities\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.059823 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-catalog-content\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.059993 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnzpp\" (UniqueName: \"kubernetes.io/projected/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-kube-api-access-gnzpp\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.162123 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnzpp\" (UniqueName: \"kubernetes.io/projected/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-kube-api-access-gnzpp\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.162188 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-utilities\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.162220 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-catalog-content\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.162726 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-catalog-content\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.163283 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-utilities\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.195641 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnzpp\" (UniqueName: \"kubernetes.io/projected/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-kube-api-access-gnzpp\") pod \"certified-operators-k8q9t\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.330255 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.418217 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.576547 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-combined-ca-bundle\") pod \"4e512237-f0a0-4312-900f-5f8cd066f34c\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.576627 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-ovndb-tls-certs\") pod \"4e512237-f0a0-4312-900f-5f8cd066f34c\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.576685 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kcqx\" (UniqueName: \"kubernetes.io/projected/4e512237-f0a0-4312-900f-5f8cd066f34c-kube-api-access-4kcqx\") pod \"4e512237-f0a0-4312-900f-5f8cd066f34c\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.576738 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config\") pod \"4e512237-f0a0-4312-900f-5f8cd066f34c\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.576813 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-httpd-config\") pod \"4e512237-f0a0-4312-900f-5f8cd066f34c\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.591107 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "4e512237-f0a0-4312-900f-5f8cd066f34c" (UID: "4e512237-f0a0-4312-900f-5f8cd066f34c"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.592651 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e512237-f0a0-4312-900f-5f8cd066f34c-kube-api-access-4kcqx" (OuterVolumeSpecName: "kube-api-access-4kcqx") pod "4e512237-f0a0-4312-900f-5f8cd066f34c" (UID: "4e512237-f0a0-4312-900f-5f8cd066f34c"). InnerVolumeSpecName "kube-api-access-4kcqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.662415 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4e512237-f0a0-4312-900f-5f8cd066f34c" (UID: "4e512237-f0a0-4312-900f-5f8cd066f34c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.685755 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config" (OuterVolumeSpecName: "config") pod "4e512237-f0a0-4312-900f-5f8cd066f34c" (UID: "4e512237-f0a0-4312-900f-5f8cd066f34c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.691207 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config\") pod \"4e512237-f0a0-4312-900f-5f8cd066f34c\" (UID: \"4e512237-f0a0-4312-900f-5f8cd066f34c\") " Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.694309 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.694338 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kcqx\" (UniqueName: \"kubernetes.io/projected/4e512237-f0a0-4312-900f-5f8cd066f34c-kube-api-access-4kcqx\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.694352 4706 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-httpd-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:53 crc kubenswrapper[4706]: W1206 05:47:53.694457 4706 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/4e512237-f0a0-4312-900f-5f8cd066f34c/volumes/kubernetes.io~secret/config Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.694470 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config" (OuterVolumeSpecName: "config") pod "4e512237-f0a0-4312-900f-5f8cd066f34c" (UID: "4e512237-f0a0-4312-900f-5f8cd066f34c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.755176 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fdfe9ea0-e897-4071-9b1c-dcdd908b549d","Type":"ContainerStarted","Data":"7da90f08c84332cf43cf0c244fff04190b15c28dfdbadef1108aeb66c6f9ecf6"} Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.776273 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "4e512237-f0a0-4312-900f-5f8cd066f34c" (UID: "4e512237-f0a0-4312-900f-5f8cd066f34c"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.778882 4706 generic.go:334] "Generic (PLEG): container finished" podID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerID="0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d" exitCode=0 Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.778963 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-565566dfbd-5h6dj" event={"ID":"4e512237-f0a0-4312-900f-5f8cd066f34c","Type":"ContainerDied","Data":"0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d"} Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.778997 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-565566dfbd-5h6dj" event={"ID":"4e512237-f0a0-4312-900f-5f8cd066f34c","Type":"ContainerDied","Data":"8ad46cf6cd5b3fc6b527b3397986530332a54cc9b1096d3c9fee077473030702"} Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.779017 4706 scope.go:117] "RemoveContainer" containerID="31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.779189 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-565566dfbd-5h6dj" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.798248 4706 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.798290 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/4e512237-f0a0-4312-900f-5f8cd066f34c-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.806334 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-central-agent" containerID="cri-o://23a30859c2c49556a518ddbc641aeb0c4a216fb10e2b452546a9eec5ec4ec41f" gracePeriod=30 Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.806593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerStarted","Data":"52ba5812c0230bedb201fff3f3ce8f91ed4aec34fbff919bb37af14da33e8521"} Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.806645 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.806923 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="proxy-httpd" containerID="cri-o://52ba5812c0230bedb201fff3f3ce8f91ed4aec34fbff919bb37af14da33e8521" gracePeriod=30 Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.806992 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="sg-core" containerID="cri-o://3d9dee6ddb4f2552cf77c605768981fec6ef094cc02937e23efa5652873032ce" gracePeriod=30 Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.807066 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-notification-agent" containerID="cri-o://522917d6dbf674c02bff7cb1487a46a8823b4691a92be658ffc70bdae0972afe" gracePeriod=30 Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.851498 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.447317294 podStartE2EDuration="22.851479045s" podCreationTimestamp="2025-12-06 05:47:31 +0000 UTC" firstStartedPulling="2025-12-06 05:47:32.477797843 +0000 UTC m=+1674.805621787" lastFinishedPulling="2025-12-06 05:47:52.881959594 +0000 UTC m=+1695.209783538" observedRunningTime="2025-12-06 05:47:53.841084504 +0000 UTC m=+1696.168908448" watchObservedRunningTime="2025-12-06 05:47:53.851479045 +0000 UTC m=+1696.179302989" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.852270 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=9.852261215 podStartE2EDuration="9.852261215s" podCreationTimestamp="2025-12-06 05:47:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:47:53.792230544 +0000 UTC m=+1696.120054488" watchObservedRunningTime="2025-12-06 05:47:53.852261215 +0000 UTC m=+1696.180085159" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.890155 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-565566dfbd-5h6dj"] Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.896966 4706 scope.go:117] "RemoveContainer" containerID="0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d" Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.905135 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-565566dfbd-5h6dj"] Dec 06 05:47:53 crc kubenswrapper[4706]: I1206 05:47:53.913562 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-k8q9t"] Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.051502 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" path="/var/lib/kubelet/pods/4e512237-f0a0-4312-900f-5f8cd066f34c/volumes" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.087385 4706 scope.go:117] "RemoveContainer" containerID="31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6" Dec 06 05:47:54 crc kubenswrapper[4706]: E1206 05:47:54.088463 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6\": container with ID starting with 31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6 not found: ID does not exist" containerID="31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.088497 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6"} err="failed to get container status \"31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6\": rpc error: code = NotFound desc = could not find container \"31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6\": container with ID starting with 31d2454ebb8f95d79b52c6dcf0f9c935403cc845a8f5603a0da4e80479ae77e6 not found: ID does not exist" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.088526 4706 scope.go:117] "RemoveContainer" containerID="0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d" Dec 06 05:47:54 crc kubenswrapper[4706]: E1206 05:47:54.089709 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d\": container with ID starting with 0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d not found: ID does not exist" containerID="0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.089770 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d"} err="failed to get container status \"0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d\": rpc error: code = NotFound desc = could not find container \"0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d\": container with ID starting with 0fa82b6274e7874b77818c54894c836b4c1189bff7d2786933786e37292def0d not found: ID does not exist" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.453099 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.726509 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.726578 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.763987 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.788267 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823804 4706 generic.go:334] "Generic (PLEG): container finished" podID="312c3de0-e931-4bf0-ae83-588410c22061" containerID="52ba5812c0230bedb201fff3f3ce8f91ed4aec34fbff919bb37af14da33e8521" exitCode=0 Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823833 4706 generic.go:334] "Generic (PLEG): container finished" podID="312c3de0-e931-4bf0-ae83-588410c22061" containerID="3d9dee6ddb4f2552cf77c605768981fec6ef094cc02937e23efa5652873032ce" exitCode=2 Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823840 4706 generic.go:334] "Generic (PLEG): container finished" podID="312c3de0-e931-4bf0-ae83-588410c22061" containerID="522917d6dbf674c02bff7cb1487a46a8823b4691a92be658ffc70bdae0972afe" exitCode=0 Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823846 4706 generic.go:334] "Generic (PLEG): container finished" podID="312c3de0-e931-4bf0-ae83-588410c22061" containerID="23a30859c2c49556a518ddbc641aeb0c4a216fb10e2b452546a9eec5ec4ec41f" exitCode=0 Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823888 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerDied","Data":"52ba5812c0230bedb201fff3f3ce8f91ed4aec34fbff919bb37af14da33e8521"} Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823913 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerDied","Data":"3d9dee6ddb4f2552cf77c605768981fec6ef094cc02937e23efa5652873032ce"} Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823924 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerDied","Data":"522917d6dbf674c02bff7cb1487a46a8823b4691a92be658ffc70bdae0972afe"} Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.823933 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerDied","Data":"23a30859c2c49556a518ddbc641aeb0c4a216fb10e2b452546a9eec5ec4ec41f"} Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.829407 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerStarted","Data":"9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b"} Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.829447 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerStarted","Data":"77346e6f4bb438b4a0b76436a27ae8923517fb4ef696ea66fb63a3a2698293e7"} Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.830997 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.831022 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.838673 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.838718 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.873734 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 06 05:47:54 crc kubenswrapper[4706]: I1206 05:47:54.879398 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Dec 06 05:47:55 crc kubenswrapper[4706]: I1206 05:47:55.842786 4706 generic.go:334] "Generic (PLEG): container finished" podID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerID="9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b" exitCode=0 Dec 06 05:47:55 crc kubenswrapper[4706]: I1206 05:47:55.842874 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerDied","Data":"9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b"} Dec 06 05:47:55 crc kubenswrapper[4706]: I1206 05:47:55.843757 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 06 05:47:55 crc kubenswrapper[4706]: I1206 05:47:55.843986 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.035418 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.141070 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-sg-core-conf-yaml\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.141231 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-combined-ca-bundle\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.142012 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krpdt\" (UniqueName: \"kubernetes.io/projected/312c3de0-e931-4bf0-ae83-588410c22061-kube-api-access-krpdt\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.142060 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-config-data\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.142089 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-log-httpd\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.142163 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-scripts\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.142198 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-run-httpd\") pod \"312c3de0-e931-4bf0-ae83-588410c22061\" (UID: \"312c3de0-e931-4bf0-ae83-588410c22061\") " Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.143445 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.143773 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.149212 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-scripts" (OuterVolumeSpecName: "scripts") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.168544 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/312c3de0-e931-4bf0-ae83-588410c22061-kube-api-access-krpdt" (OuterVolumeSpecName: "kube-api-access-krpdt") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "kube-api-access-krpdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.179206 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.237254 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.244935 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.245291 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.245434 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.245570 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krpdt\" (UniqueName: \"kubernetes.io/projected/312c3de0-e931-4bf0-ae83-588410c22061-kube-api-access-krpdt\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.245677 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/312c3de0-e931-4bf0-ae83-588410c22061-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.245842 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.267301 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-config-data" (OuterVolumeSpecName: "config-data") pod "312c3de0-e931-4bf0-ae83-588410c22061" (UID: "312c3de0-e931-4bf0-ae83-588410c22061"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.347273 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/312c3de0-e931-4bf0-ae83-588410c22061-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.869797 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.871160 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"312c3de0-e931-4bf0-ae83-588410c22061","Type":"ContainerDied","Data":"d5756a09975524bdcb1140ffda48c7b66e0452c3d4f203e938f86278270ecaca"} Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.871210 4706 scope.go:117] "RemoveContainer" containerID="52ba5812c0230bedb201fff3f3ce8f91ed4aec34fbff919bb37af14da33e8521" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.907211 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.921120 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.927765 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:56 crc kubenswrapper[4706]: E1206 05:47:56.928193 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-api" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928212 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-api" Dec 06 05:47:56 crc kubenswrapper[4706]: E1206 05:47:56.928240 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-central-agent" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928247 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-central-agent" Dec 06 05:47:56 crc kubenswrapper[4706]: E1206 05:47:56.928266 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-httpd" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928272 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-httpd" Dec 06 05:47:56 crc kubenswrapper[4706]: E1206 05:47:56.928282 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="sg-core" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928288 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="sg-core" Dec 06 05:47:56 crc kubenswrapper[4706]: E1206 05:47:56.928303 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="proxy-httpd" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928312 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="proxy-httpd" Dec 06 05:47:56 crc kubenswrapper[4706]: E1206 05:47:56.928327 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-notification-agent" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928335 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-notification-agent" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928507 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="sg-core" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928525 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-central-agent" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928538 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-httpd" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928550 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e512237-f0a0-4312-900f-5f8cd066f34c" containerName="neutron-api" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928561 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="ceilometer-notification-agent" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.928569 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="312c3de0-e931-4bf0-ae83-588410c22061" containerName="proxy-httpd" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.930237 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.938943 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.939451 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.947565 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.962607 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-run-httpd\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.962855 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-config-data\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.963219 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.963432 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc69x\" (UniqueName: \"kubernetes.io/projected/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-kube-api-access-cc69x\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.963511 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.963534 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-scripts\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:56 crc kubenswrapper[4706]: I1206 05:47:56.963561 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-log-httpd\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.067571 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-run-httpd\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.070750 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-run-httpd\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.070907 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-config-data\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.071079 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.071183 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc69x\" (UniqueName: \"kubernetes.io/projected/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-kube-api-access-cc69x\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.071244 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.071288 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-scripts\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.071313 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-log-httpd\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.073121 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-log-httpd\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.079576 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-scripts\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.080783 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.081856 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.081977 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-config-data\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.095933 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc69x\" (UniqueName: \"kubernetes.io/projected/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-kube-api-access-cc69x\") pod \"ceilometer-0\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.302756 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.948586 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.948692 4706 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.962293 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Dec 06 05:47:57 crc kubenswrapper[4706]: I1206 05:47:57.993820 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:58 crc kubenswrapper[4706]: I1206 05:47:58.087850 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="312c3de0-e931-4bf0-ae83-588410c22061" path="/var/lib/kubelet/pods/312c3de0-e931-4bf0-ae83-588410c22061/volumes" Dec 06 05:47:58 crc kubenswrapper[4706]: I1206 05:47:58.095219 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Dec 06 05:47:58 crc kubenswrapper[4706]: I1206 05:47:58.595496 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.057834 4706 scope.go:117] "RemoveContainer" containerID="3d9dee6ddb4f2552cf77c605768981fec6ef094cc02937e23efa5652873032ce" Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.125408 4706 scope.go:117] "RemoveContainer" containerID="522917d6dbf674c02bff7cb1487a46a8823b4691a92be658ffc70bdae0972afe" Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.301001 4706 scope.go:117] "RemoveContainer" containerID="23a30859c2c49556a518ddbc641aeb0c4a216fb10e2b452546a9eec5ec4ec41f" Dec 06 05:48:02 crc kubenswrapper[4706]: W1206 05:48:02.573336 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c944d1e_e377_4c7c_9ffc_8deb9d1f271a.slice/crio-71639dd5eb750978e06c25b77a05f413beb00d9295c23cfd33631cc8944b244f WatchSource:0}: Error finding container 71639dd5eb750978e06c25b77a05f413beb00d9295c23cfd33631cc8944b244f: Status 404 returned error can't find the container with id 71639dd5eb750978e06c25b77a05f413beb00d9295c23cfd33631cc8944b244f Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.573976 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.929879 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerStarted","Data":"71639dd5eb750978e06c25b77a05f413beb00d9295c23cfd33631cc8944b244f"} Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.932040 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" event={"ID":"1ff0bb4e-18a4-493e-a666-e94aa8bacea5","Type":"ContainerStarted","Data":"be2d46dba7aeff67ac6593d4426b40cbcef335e71dab6335d7a1824b22b0c3e3"} Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.934513 4706 generic.go:334] "Generic (PLEG): container finished" podID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerID="edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f" exitCode=0 Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.934561 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerDied","Data":"edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f"} Dec 06 05:48:02 crc kubenswrapper[4706]: I1206 05:48:02.955019 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" podStartSLOduration=1.923656131 podStartE2EDuration="11.954996504s" podCreationTimestamp="2025-12-06 05:47:51 +0000 UTC" firstStartedPulling="2025-12-06 05:47:52.103578018 +0000 UTC m=+1694.431401962" lastFinishedPulling="2025-12-06 05:48:02.134918391 +0000 UTC m=+1704.462742335" observedRunningTime="2025-12-06 05:48:02.951165971 +0000 UTC m=+1705.278989915" watchObservedRunningTime="2025-12-06 05:48:02.954996504 +0000 UTC m=+1705.282820448" Dec 06 05:48:05 crc kubenswrapper[4706]: I1206 05:48:05.960927 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:48:05 crc kubenswrapper[4706]: I1206 05:48:05.961527 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:48:05 crc kubenswrapper[4706]: I1206 05:48:05.961564 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:48:05 crc kubenswrapper[4706]: I1206 05:48:05.962103 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerStarted","Data":"1d9f04c2e00e3d9baf5d449142649a3ea1451e369225aa51d884d5dc3866af3b"} Dec 06 05:48:05 crc kubenswrapper[4706]: I1206 05:48:05.962329 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:48:05 crc kubenswrapper[4706]: I1206 05:48:05.962382 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" gracePeriod=600 Dec 06 05:48:06 crc kubenswrapper[4706]: E1206 05:48:06.097064 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:48:06 crc kubenswrapper[4706]: I1206 05:48:06.973757 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerStarted","Data":"e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683"} Dec 06 05:48:06 crc kubenswrapper[4706]: I1206 05:48:06.993068 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-k8q9t" podStartSLOduration=4.847638001 podStartE2EDuration="14.993031516s" podCreationTimestamp="2025-12-06 05:47:52 +0000 UTC" firstStartedPulling="2025-12-06 05:47:55.847276468 +0000 UTC m=+1698.175100412" lastFinishedPulling="2025-12-06 05:48:05.992669983 +0000 UTC m=+1708.320493927" observedRunningTime="2025-12-06 05:48:06.990990011 +0000 UTC m=+1709.318813975" watchObservedRunningTime="2025-12-06 05:48:06.993031516 +0000 UTC m=+1709.320855460" Dec 06 05:48:07 crc kubenswrapper[4706]: I1206 05:48:07.022690 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" exitCode=0 Dec 06 05:48:07 crc kubenswrapper[4706]: I1206 05:48:07.022808 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a"} Dec 06 05:48:07 crc kubenswrapper[4706]: I1206 05:48:07.023678 4706 scope.go:117] "RemoveContainer" containerID="71fb78259889c3e53f18a29621b104746019c251e6090d6297b3d1c61fdcf223" Dec 06 05:48:07 crc kubenswrapper[4706]: I1206 05:48:07.024785 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:48:07 crc kubenswrapper[4706]: E1206 05:48:07.025080 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:48:07 crc kubenswrapper[4706]: I1206 05:48:07.026551 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerStarted","Data":"43e441eaeec72eca1e27c09f1a5ac0f91f546a7d2b8bf870a4f979a2f8f478eb"} Dec 06 05:48:11 crc kubenswrapper[4706]: I1206 05:48:11.093414 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerStarted","Data":"fc438c1010396ca7e81ae152c401c2a02f0d90d904c513173615633a351601b2"} Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.117791 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerStarted","Data":"a81c3d69450aa91c8fe0be6b92fe67d9c81486bb34b3de3a7d07bc10cbe5d78c"} Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.117920 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-central-agent" containerID="cri-o://1d9f04c2e00e3d9baf5d449142649a3ea1451e369225aa51d884d5dc3866af3b" gracePeriod=30 Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.118194 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.118227 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="proxy-httpd" containerID="cri-o://a81c3d69450aa91c8fe0be6b92fe67d9c81486bb34b3de3a7d07bc10cbe5d78c" gracePeriod=30 Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.118307 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="sg-core" containerID="cri-o://fc438c1010396ca7e81ae152c401c2a02f0d90d904c513173615633a351601b2" gracePeriod=30 Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.118306 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-notification-agent" containerID="cri-o://43e441eaeec72eca1e27c09f1a5ac0f91f546a7d2b8bf870a4f979a2f8f478eb" gracePeriod=30 Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.140702 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=7.640220855 podStartE2EDuration="17.140686767s" podCreationTimestamp="2025-12-06 05:47:56 +0000 UTC" firstStartedPulling="2025-12-06 05:48:02.575694688 +0000 UTC m=+1704.903518632" lastFinishedPulling="2025-12-06 05:48:12.0761606 +0000 UTC m=+1714.403984544" observedRunningTime="2025-12-06 05:48:13.139193467 +0000 UTC m=+1715.467017421" watchObservedRunningTime="2025-12-06 05:48:13.140686767 +0000 UTC m=+1715.468510711" Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.331405 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.331755 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:48:13 crc kubenswrapper[4706]: I1206 05:48:13.397861 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.126905 4706 generic.go:334] "Generic (PLEG): container finished" podID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerID="a81c3d69450aa91c8fe0be6b92fe67d9c81486bb34b3de3a7d07bc10cbe5d78c" exitCode=0 Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.126937 4706 generic.go:334] "Generic (PLEG): container finished" podID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerID="fc438c1010396ca7e81ae152c401c2a02f0d90d904c513173615633a351601b2" exitCode=2 Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.126945 4706 generic.go:334] "Generic (PLEG): container finished" podID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerID="43e441eaeec72eca1e27c09f1a5ac0f91f546a7d2b8bf870a4f979a2f8f478eb" exitCode=0 Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.126940 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerDied","Data":"a81c3d69450aa91c8fe0be6b92fe67d9c81486bb34b3de3a7d07bc10cbe5d78c"} Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.126974 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerDied","Data":"fc438c1010396ca7e81ae152c401c2a02f0d90d904c513173615633a351601b2"} Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.126989 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerDied","Data":"43e441eaeec72eca1e27c09f1a5ac0f91f546a7d2b8bf870a4f979a2f8f478eb"} Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.170643 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:48:14 crc kubenswrapper[4706]: I1206 05:48:14.229000 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k8q9t"] Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.145422 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-k8q9t" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="registry-server" containerID="cri-o://e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683" gracePeriod=2 Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.684853 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.854764 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-catalog-content\") pod \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.854966 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-utilities\") pod \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.855040 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnzpp\" (UniqueName: \"kubernetes.io/projected/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-kube-api-access-gnzpp\") pod \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\" (UID: \"4b91a7da-0909-4a62-b283-5c1cb74b3f5d\") " Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.855527 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-utilities" (OuterVolumeSpecName: "utilities") pod "4b91a7da-0909-4a62-b283-5c1cb74b3f5d" (UID: "4b91a7da-0909-4a62-b283-5c1cb74b3f5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.861615 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-kube-api-access-gnzpp" (OuterVolumeSpecName: "kube-api-access-gnzpp") pod "4b91a7da-0909-4a62-b283-5c1cb74b3f5d" (UID: "4b91a7da-0909-4a62-b283-5c1cb74b3f5d"). InnerVolumeSpecName "kube-api-access-gnzpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.905949 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b91a7da-0909-4a62-b283-5c1cb74b3f5d" (UID: "4b91a7da-0909-4a62-b283-5c1cb74b3f5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.957187 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnzpp\" (UniqueName: \"kubernetes.io/projected/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-kube-api-access-gnzpp\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.957224 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:16 crc kubenswrapper[4706]: I1206 05:48:16.957237 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b91a7da-0909-4a62-b283-5c1cb74b3f5d-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.158005 4706 generic.go:334] "Generic (PLEG): container finished" podID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerID="e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683" exitCode=0 Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.158081 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerDied","Data":"e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683"} Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.158129 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-k8q9t" event={"ID":"4b91a7da-0909-4a62-b283-5c1cb74b3f5d","Type":"ContainerDied","Data":"77346e6f4bb438b4a0b76436a27ae8923517fb4ef696ea66fb63a3a2698293e7"} Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.158140 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-k8q9t" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.158149 4706 scope.go:117] "RemoveContainer" containerID="e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.187967 4706 scope.go:117] "RemoveContainer" containerID="edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.215639 4706 scope.go:117] "RemoveContainer" containerID="9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.217693 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-k8q9t"] Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.228586 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-k8q9t"] Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.256067 4706 scope.go:117] "RemoveContainer" containerID="e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683" Dec 06 05:48:17 crc kubenswrapper[4706]: E1206 05:48:17.256557 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683\": container with ID starting with e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683 not found: ID does not exist" containerID="e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.256618 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683"} err="failed to get container status \"e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683\": rpc error: code = NotFound desc = could not find container \"e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683\": container with ID starting with e8f589bb200884dbbb3a08272a51d1ca0ea82b022c7318b8a598c989fb90d683 not found: ID does not exist" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.256650 4706 scope.go:117] "RemoveContainer" containerID="edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f" Dec 06 05:48:17 crc kubenswrapper[4706]: E1206 05:48:17.257015 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f\": container with ID starting with edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f not found: ID does not exist" containerID="edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.257069 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f"} err="failed to get container status \"edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f\": rpc error: code = NotFound desc = could not find container \"edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f\": container with ID starting with edbb472549000161f567d07bb65ec8a55142a1969b9936996429152ede2bc86f not found: ID does not exist" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.257098 4706 scope.go:117] "RemoveContainer" containerID="9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b" Dec 06 05:48:17 crc kubenswrapper[4706]: E1206 05:48:17.258235 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b\": container with ID starting with 9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b not found: ID does not exist" containerID="9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b" Dec 06 05:48:17 crc kubenswrapper[4706]: I1206 05:48:17.258274 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b"} err="failed to get container status \"9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b\": rpc error: code = NotFound desc = could not find container \"9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b\": container with ID starting with 9c630b1cbb0551dc197ac120dbd3f9f8a443366146dd501cb355df1867c9de9b not found: ID does not exist" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.047842 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" path="/var/lib/kubelet/pods/4b91a7da-0909-4a62-b283-5c1cb74b3f5d/volumes" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.172469 4706 generic.go:334] "Generic (PLEG): container finished" podID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerID="1d9f04c2e00e3d9baf5d449142649a3ea1451e369225aa51d884d5dc3866af3b" exitCode=0 Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.172546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerDied","Data":"1d9f04c2e00e3d9baf5d449142649a3ea1451e369225aa51d884d5dc3866af3b"} Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.536888 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.694813 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-scripts\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.694874 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-sg-core-conf-yaml\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.694949 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc69x\" (UniqueName: \"kubernetes.io/projected/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-kube-api-access-cc69x\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.695021 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-combined-ca-bundle\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.695177 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-log-httpd\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.695235 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-run-httpd\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.695266 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-config-data\") pod \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\" (UID: \"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a\") " Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.695726 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.695850 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.702116 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-kube-api-access-cc69x" (OuterVolumeSpecName: "kube-api-access-cc69x") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "kube-api-access-cc69x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.717919 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-scripts" (OuterVolumeSpecName: "scripts") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.722036 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.772024 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.795465 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-config-data" (OuterVolumeSpecName: "config-data") pod "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" (UID: "6c944d1e-e377-4c7c-9ffc-8deb9d1f271a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797545 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797573 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797585 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc69x\" (UniqueName: \"kubernetes.io/projected/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-kube-api-access-cc69x\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797594 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797602 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797611 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:18 crc kubenswrapper[4706]: I1206 05:48:18.797619 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.036499 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.036759 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.185752 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c944d1e-e377-4c7c-9ffc-8deb9d1f271a","Type":"ContainerDied","Data":"71639dd5eb750978e06c25b77a05f413beb00d9295c23cfd33631cc8944b244f"} Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.185803 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.185816 4706 scope.go:117] "RemoveContainer" containerID="a81c3d69450aa91c8fe0be6b92fe67d9c81486bb34b3de3a7d07bc10cbe5d78c" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.188718 4706 generic.go:334] "Generic (PLEG): container finished" podID="1ff0bb4e-18a4-493e-a666-e94aa8bacea5" containerID="be2d46dba7aeff67ac6593d4426b40cbcef335e71dab6335d7a1824b22b0c3e3" exitCode=0 Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.188755 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" event={"ID":"1ff0bb4e-18a4-493e-a666-e94aa8bacea5","Type":"ContainerDied","Data":"be2d46dba7aeff67ac6593d4426b40cbcef335e71dab6335d7a1824b22b0c3e3"} Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.217409 4706 scope.go:117] "RemoveContainer" containerID="fc438c1010396ca7e81ae152c401c2a02f0d90d904c513173615633a351601b2" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.233622 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.258805 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.268090 4706 scope.go:117] "RemoveContainer" containerID="43e441eaeec72eca1e27c09f1a5ac0f91f546a7d2b8bf870a4f979a2f8f478eb" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.316679 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317324 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="registry-server" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317343 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="registry-server" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317357 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-central-agent" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317362 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-central-agent" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317376 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="sg-core" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317382 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="sg-core" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317389 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="proxy-httpd" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317394 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="proxy-httpd" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317401 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="extract-content" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317407 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="extract-content" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317428 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-notification-agent" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317433 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-notification-agent" Dec 06 05:48:19 crc kubenswrapper[4706]: E1206 05:48:19.317443 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="extract-utilities" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317449 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="extract-utilities" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317630 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-central-agent" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.317645 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b91a7da-0909-4a62-b283-5c1cb74b3f5d" containerName="registry-server" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.328686 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="ceilometer-notification-agent" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.328718 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="proxy-httpd" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.328730 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" containerName="sg-core" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.331106 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.331199 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.333391 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.333569 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.335284 4706 scope.go:117] "RemoveContainer" containerID="1d9f04c2e00e3d9baf5d449142649a3ea1451e369225aa51d884d5dc3866af3b" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.513950 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-log-httpd\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.514064 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-scripts\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.514089 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cp44\" (UniqueName: \"kubernetes.io/projected/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-kube-api-access-2cp44\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.514152 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-run-httpd\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.514178 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.514233 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-config-data\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.514311 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615638 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-config-data\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615700 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615759 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-log-httpd\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615803 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-scripts\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615822 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cp44\" (UniqueName: \"kubernetes.io/projected/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-kube-api-access-2cp44\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615889 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-run-httpd\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.615928 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.616322 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-log-httpd\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.616464 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-run-httpd\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.621936 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.621951 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-scripts\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.631186 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-config-data\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.631664 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.633854 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cp44\" (UniqueName: \"kubernetes.io/projected/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-kube-api-access-2cp44\") pod \"ceilometer-0\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " pod="openstack/ceilometer-0" Dec 06 05:48:19 crc kubenswrapper[4706]: I1206 05:48:19.652842 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.045724 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c944d1e-e377-4c7c-9ffc-8deb9d1f271a" path="/var/lib/kubelet/pods/6c944d1e-e377-4c7c-9ffc-8deb9d1f271a/volumes" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.105738 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.198298 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerStarted","Data":"df46ea90f8728d7294fee4e0c77dab88c19ef47a8a6170f8aed0aa862747ff54"} Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.734156 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.845120 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pm8gw\" (UniqueName: \"kubernetes.io/projected/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-kube-api-access-pm8gw\") pod \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.845630 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-config-data\") pod \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.845943 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-combined-ca-bundle\") pod \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.846097 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-scripts\") pod \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\" (UID: \"1ff0bb4e-18a4-493e-a666-e94aa8bacea5\") " Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.867487 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-scripts" (OuterVolumeSpecName: "scripts") pod "1ff0bb4e-18a4-493e-a666-e94aa8bacea5" (UID: "1ff0bb4e-18a4-493e-a666-e94aa8bacea5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.867640 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-kube-api-access-pm8gw" (OuterVolumeSpecName: "kube-api-access-pm8gw") pod "1ff0bb4e-18a4-493e-a666-e94aa8bacea5" (UID: "1ff0bb4e-18a4-493e-a666-e94aa8bacea5"). InnerVolumeSpecName "kube-api-access-pm8gw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.872459 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-config-data" (OuterVolumeSpecName: "config-data") pod "1ff0bb4e-18a4-493e-a666-e94aa8bacea5" (UID: "1ff0bb4e-18a4-493e-a666-e94aa8bacea5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.896339 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ff0bb4e-18a4-493e-a666-e94aa8bacea5" (UID: "1ff0bb4e-18a4-493e-a666-e94aa8bacea5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.948518 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.948547 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.948556 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pm8gw\" (UniqueName: \"kubernetes.io/projected/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-kube-api-access-pm8gw\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:20 crc kubenswrapper[4706]: I1206 05:48:20.948567 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ff0bb4e-18a4-493e-a666-e94aa8bacea5-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.081513 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.208270 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.208258 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-7gj9p" event={"ID":"1ff0bb4e-18a4-493e-a666-e94aa8bacea5","Type":"ContainerDied","Data":"d502ccec2fb9a3bc05cd386f98c8a951c9f10d03d6d69bdb25317d69d588a4da"} Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.208420 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d502ccec2fb9a3bc05cd386f98c8a951c9f10d03d6d69bdb25317d69d588a4da" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.209737 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerStarted","Data":"759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb"} Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.284809 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:21 crc kubenswrapper[4706]: E1206 05:48:21.285292 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ff0bb4e-18a4-493e-a666-e94aa8bacea5" containerName="nova-cell0-conductor-db-sync" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.285315 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ff0bb4e-18a4-493e-a666-e94aa8bacea5" containerName="nova-cell0-conductor-db-sync" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.285534 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ff0bb4e-18a4-493e-a666-e94aa8bacea5" containerName="nova-cell0-conductor-db-sync" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.295234 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.298895 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.299870 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.299987 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xfwtm" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.456276 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crwxh\" (UniqueName: \"kubernetes.io/projected/1aea3621-829d-43f2-8986-a161b7c2e0ec-kube-api-access-crwxh\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.456348 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.456504 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.558040 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crwxh\" (UniqueName: \"kubernetes.io/projected/1aea3621-829d-43f2-8986-a161b7c2e0ec-kube-api-access-crwxh\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.558122 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.558215 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.562483 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.562688 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.577970 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crwxh\" (UniqueName: \"kubernetes.io/projected/1aea3621-829d-43f2-8986-a161b7c2e0ec-kube-api-access-crwxh\") pod \"nova-cell0-conductor-0\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:21 crc kubenswrapper[4706]: I1206 05:48:21.611335 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:22 crc kubenswrapper[4706]: I1206 05:48:22.715669 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:23 crc kubenswrapper[4706]: I1206 05:48:23.230445 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerStarted","Data":"fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc"} Dec 06 05:48:23 crc kubenswrapper[4706]: I1206 05:48:23.250756 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:24 crc kubenswrapper[4706]: I1206 05:48:24.241617 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1aea3621-829d-43f2-8986-a161b7c2e0ec","Type":"ContainerStarted","Data":"ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9"} Dec 06 05:48:24 crc kubenswrapper[4706]: I1206 05:48:24.241952 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1aea3621-829d-43f2-8986-a161b7c2e0ec","Type":"ContainerStarted","Data":"7ff9e05422876a9b4099d8a543ab7b08b5e295f4358bf1f8016b34df5e400f48"} Dec 06 05:48:24 crc kubenswrapper[4706]: I1206 05:48:24.241862 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" containerID="cri-o://ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" gracePeriod=30 Dec 06 05:48:24 crc kubenswrapper[4706]: I1206 05:48:24.243713 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:24 crc kubenswrapper[4706]: I1206 05:48:24.247134 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerStarted","Data":"0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61"} Dec 06 05:48:24 crc kubenswrapper[4706]: I1206 05:48:24.262107 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.262090087 podStartE2EDuration="3.262090087s" podCreationTimestamp="2025-12-06 05:48:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:48:24.25885876 +0000 UTC m=+1726.586682704" watchObservedRunningTime="2025-12-06 05:48:24.262090087 +0000 UTC m=+1726.589914021" Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.268305 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerStarted","Data":"7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5"} Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.268939 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.268638 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="sg-core" containerID="cri-o://0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61" gracePeriod=30 Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.268712 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="proxy-httpd" containerID="cri-o://7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5" gracePeriod=30 Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.268739 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-notification-agent" containerID="cri-o://fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc" gracePeriod=30 Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.268552 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-central-agent" containerID="cri-o://759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb" gracePeriod=30 Dec 06 05:48:26 crc kubenswrapper[4706]: I1206 05:48:26.304154 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.24556691 podStartE2EDuration="7.304132861s" podCreationTimestamp="2025-12-06 05:48:19 +0000 UTC" firstStartedPulling="2025-12-06 05:48:20.111816564 +0000 UTC m=+1722.439640508" lastFinishedPulling="2025-12-06 05:48:25.170382515 +0000 UTC m=+1727.498206459" observedRunningTime="2025-12-06 05:48:26.291763817 +0000 UTC m=+1728.619587781" watchObservedRunningTime="2025-12-06 05:48:26.304132861 +0000 UTC m=+1728.631956815" Dec 06 05:48:27 crc kubenswrapper[4706]: I1206 05:48:27.279613 4706 generic.go:334] "Generic (PLEG): container finished" podID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerID="7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5" exitCode=0 Dec 06 05:48:27 crc kubenswrapper[4706]: I1206 05:48:27.280001 4706 generic.go:334] "Generic (PLEG): container finished" podID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerID="0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61" exitCode=2 Dec 06 05:48:27 crc kubenswrapper[4706]: I1206 05:48:27.279683 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerDied","Data":"7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5"} Dec 06 05:48:27 crc kubenswrapper[4706]: I1206 05:48:27.280062 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerDied","Data":"0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61"} Dec 06 05:48:27 crc kubenswrapper[4706]: I1206 05:48:27.280077 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerDied","Data":"fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc"} Dec 06 05:48:27 crc kubenswrapper[4706]: I1206 05:48:27.280015 4706 generic.go:334] "Generic (PLEG): container finished" podID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerID="fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc" exitCode=0 Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.124762 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.248761 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cp44\" (UniqueName: \"kubernetes.io/projected/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-kube-api-access-2cp44\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.248812 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-combined-ca-bundle\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.248886 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-run-httpd\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.248922 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-scripts\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.248942 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-sg-core-conf-yaml\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.249109 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-config-data\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.249140 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-log-httpd\") pod \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\" (UID: \"bb963d0c-6bd6-4e72-9ac8-1f66816271b6\") " Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.249557 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.249749 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.249866 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.254685 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-kube-api-access-2cp44" (OuterVolumeSpecName: "kube-api-access-2cp44") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "kube-api-access-2cp44". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.255104 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-scripts" (OuterVolumeSpecName: "scripts") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.283282 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.316781 4706 generic.go:334] "Generic (PLEG): container finished" podID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerID="759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb" exitCode=0 Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.316839 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerDied","Data":"759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb"} Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.316900 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bb963d0c-6bd6-4e72-9ac8-1f66816271b6","Type":"ContainerDied","Data":"df46ea90f8728d7294fee4e0c77dab88c19ef47a8a6170f8aed0aa862747ff54"} Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.316901 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.316920 4706 scope.go:117] "RemoveContainer" containerID="7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.329567 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.348440 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-config-data" (OuterVolumeSpecName: "config-data") pod "bb963d0c-6bd6-4e72-9ac8-1f66816271b6" (UID: "bb963d0c-6bd6-4e72-9ac8-1f66816271b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.351601 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.351628 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cp44\" (UniqueName: \"kubernetes.io/projected/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-kube-api-access-2cp44\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.351639 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.351650 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.351659 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.351668 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bb963d0c-6bd6-4e72-9ac8-1f66816271b6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.431493 4706 scope.go:117] "RemoveContainer" containerID="0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.453163 4706 scope.go:117] "RemoveContainer" containerID="fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.476670 4706 scope.go:117] "RemoveContainer" containerID="759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.494469 4706 scope.go:117] "RemoveContainer" containerID="7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.494937 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5\": container with ID starting with 7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5 not found: ID does not exist" containerID="7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.494990 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5"} err="failed to get container status \"7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5\": rpc error: code = NotFound desc = could not find container \"7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5\": container with ID starting with 7758c53d7dde6a3fcea47465123628b23848ccffa7cf9ce2fa86192a82fe9ac5 not found: ID does not exist" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.495023 4706 scope.go:117] "RemoveContainer" containerID="0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.495516 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61\": container with ID starting with 0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61 not found: ID does not exist" containerID="0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.495563 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61"} err="failed to get container status \"0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61\": rpc error: code = NotFound desc = could not find container \"0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61\": container with ID starting with 0accc67c4a7032df96fc03bbdcead2034fc1d7595427db77a465ff3ab9cace61 not found: ID does not exist" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.495587 4706 scope.go:117] "RemoveContainer" containerID="fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.495964 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc\": container with ID starting with fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc not found: ID does not exist" containerID="fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.496002 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc"} err="failed to get container status \"fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc\": rpc error: code = NotFound desc = could not find container \"fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc\": container with ID starting with fce0861a6a3a05699b020fa9c85d8644bf82b047e7349b6bd8dcff163f2104cc not found: ID does not exist" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.496022 4706 scope.go:117] "RemoveContainer" containerID="759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.496512 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb\": container with ID starting with 759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb not found: ID does not exist" containerID="759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.496540 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb"} err="failed to get container status \"759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb\": rpc error: code = NotFound desc = could not find container \"759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb\": container with ID starting with 759d560b6fd88a2fd3aa9ebea1f5901810e6be98b8892967ff81978bc9d516cb not found: ID does not exist" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.680138 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.694223 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706225 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.706654 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="proxy-httpd" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706674 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="proxy-httpd" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.706702 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-central-agent" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706711 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-central-agent" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.706730 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-notification-agent" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706738 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-notification-agent" Dec 06 05:48:30 crc kubenswrapper[4706]: E1206 05:48:30.706756 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="sg-core" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706763 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="sg-core" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706967 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-central-agent" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706986 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="ceilometer-notification-agent" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.706997 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="sg-core" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.707027 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" containerName="proxy-httpd" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.709140 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.713726 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.713815 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.723654 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.858914 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-config-data\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.858954 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-run-httpd\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.858986 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.859060 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqpsg\" (UniqueName: \"kubernetes.io/projected/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-kube-api-access-gqpsg\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.859092 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-log-httpd\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.859124 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-scripts\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.859170 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.960853 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqpsg\" (UniqueName: \"kubernetes.io/projected/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-kube-api-access-gqpsg\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.960931 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-log-httpd\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.960977 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-scripts\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.961041 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.961096 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-config-data\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.961147 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-run-httpd\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.961179 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.962618 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-log-httpd\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.962975 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-run-httpd\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.966418 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.967447 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.967613 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-scripts\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.967933 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-config-data\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:30 crc kubenswrapper[4706]: I1206 05:48:30.981777 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqpsg\" (UniqueName: \"kubernetes.io/projected/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-kube-api-access-gqpsg\") pod \"ceilometer-0\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " pod="openstack/ceilometer-0" Dec 06 05:48:31 crc kubenswrapper[4706]: I1206 05:48:31.039163 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:48:31 crc kubenswrapper[4706]: I1206 05:48:31.474681 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:48:31 crc kubenswrapper[4706]: W1206 05:48:31.475430 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1cc02a95_a453_4485_a066_9fe2ecfcc3c5.slice/crio-ef3484541eb3f8107579c1f24c33af9bf12d39defdf96d7bcffb8714fbeb048d WatchSource:0}: Error finding container ef3484541eb3f8107579c1f24c33af9bf12d39defdf96d7bcffb8714fbeb048d: Status 404 returned error can't find the container with id ef3484541eb3f8107579c1f24c33af9bf12d39defdf96d7bcffb8714fbeb048d Dec 06 05:48:31 crc kubenswrapper[4706]: E1206 05:48:31.614493 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:31 crc kubenswrapper[4706]: E1206 05:48:31.616140 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:31 crc kubenswrapper[4706]: E1206 05:48:31.617398 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:31 crc kubenswrapper[4706]: E1206 05:48:31.617466 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:32 crc kubenswrapper[4706]: I1206 05:48:32.069525 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb963d0c-6bd6-4e72-9ac8-1f66816271b6" path="/var/lib/kubelet/pods/bb963d0c-6bd6-4e72-9ac8-1f66816271b6/volumes" Dec 06 05:48:32 crc kubenswrapper[4706]: I1206 05:48:32.338093 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerStarted","Data":"475bda868e17a0e6b46d62d8c1944b4bdee84c33f4c517a7be11e819b561b717"} Dec 06 05:48:32 crc kubenswrapper[4706]: I1206 05:48:32.338141 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerStarted","Data":"ef3484541eb3f8107579c1f24c33af9bf12d39defdf96d7bcffb8714fbeb048d"} Dec 06 05:48:33 crc kubenswrapper[4706]: I1206 05:48:33.036399 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:48:33 crc kubenswrapper[4706]: E1206 05:48:33.037057 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:48:33 crc kubenswrapper[4706]: I1206 05:48:33.393142 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerStarted","Data":"96efc7cf41accb1ec53c024abfd3315d8d301f5c71385799d22b1cca9217cfd0"} Dec 06 05:48:34 crc kubenswrapper[4706]: I1206 05:48:34.404154 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerStarted","Data":"011612a1717aab271f875fa95d5b5e62ab4c13f2052a112de429132626a14e4b"} Dec 06 05:48:35 crc kubenswrapper[4706]: I1206 05:48:35.414640 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerStarted","Data":"73e5e3669e46a8ffa63177845a0d9a370cc014f56dd047af60fdcb1b35583456"} Dec 06 05:48:35 crc kubenswrapper[4706]: I1206 05:48:35.414958 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:48:35 crc kubenswrapper[4706]: I1206 05:48:35.438920 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.179648008 podStartE2EDuration="5.438892853s" podCreationTimestamp="2025-12-06 05:48:30 +0000 UTC" firstStartedPulling="2025-12-06 05:48:31.478898399 +0000 UTC m=+1733.806722343" lastFinishedPulling="2025-12-06 05:48:34.738143244 +0000 UTC m=+1737.065967188" observedRunningTime="2025-12-06 05:48:35.431973967 +0000 UTC m=+1737.759797951" watchObservedRunningTime="2025-12-06 05:48:35.438892853 +0000 UTC m=+1737.766716837" Dec 06 05:48:36 crc kubenswrapper[4706]: E1206 05:48:36.614860 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:36 crc kubenswrapper[4706]: E1206 05:48:36.617192 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:36 crc kubenswrapper[4706]: E1206 05:48:36.619109 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:36 crc kubenswrapper[4706]: E1206 05:48:36.619146 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:41 crc kubenswrapper[4706]: E1206 05:48:41.613780 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:41 crc kubenswrapper[4706]: E1206 05:48:41.616013 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:41 crc kubenswrapper[4706]: E1206 05:48:41.617117 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:41 crc kubenswrapper[4706]: E1206 05:48:41.617158 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:44 crc kubenswrapper[4706]: I1206 05:48:44.037092 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:48:44 crc kubenswrapper[4706]: E1206 05:48:44.037628 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:48:46 crc kubenswrapper[4706]: E1206 05:48:46.613254 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:46 crc kubenswrapper[4706]: E1206 05:48:46.614881 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:46 crc kubenswrapper[4706]: E1206 05:48:46.616145 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:46 crc kubenswrapper[4706]: E1206 05:48:46.616192 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:51 crc kubenswrapper[4706]: E1206 05:48:51.614590 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:51 crc kubenswrapper[4706]: E1206 05:48:51.618350 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:51 crc kubenswrapper[4706]: E1206 05:48:51.620485 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:51 crc kubenswrapper[4706]: E1206 05:48:51.620566 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:55 crc kubenswrapper[4706]: I1206 05:48:55.035703 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:48:55 crc kubenswrapper[4706]: E1206 05:48:55.036618 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:48:55 crc kubenswrapper[4706]: I1206 05:48:55.609124 4706 generic.go:334] "Generic (PLEG): container finished" podID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" exitCode=137 Dec 06 05:48:55 crc kubenswrapper[4706]: I1206 05:48:55.609219 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1aea3621-829d-43f2-8986-a161b7c2e0ec","Type":"ContainerDied","Data":"ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9"} Dec 06 05:48:56 crc kubenswrapper[4706]: E1206 05:48:56.613014 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9 is running failed: container process not found" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:56 crc kubenswrapper[4706]: E1206 05:48:56.613715 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9 is running failed: container process not found" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:56 crc kubenswrapper[4706]: E1206 05:48:56.614228 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9 is running failed: container process not found" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Dec 06 05:48:56 crc kubenswrapper[4706]: E1206 05:48:56.614303 4706 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:56 crc kubenswrapper[4706]: I1206 05:48:56.953383 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.067710 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-combined-ca-bundle\") pod \"1aea3621-829d-43f2-8986-a161b7c2e0ec\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.067829 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-config-data\") pod \"1aea3621-829d-43f2-8986-a161b7c2e0ec\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.067937 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-crwxh\" (UniqueName: \"kubernetes.io/projected/1aea3621-829d-43f2-8986-a161b7c2e0ec-kube-api-access-crwxh\") pod \"1aea3621-829d-43f2-8986-a161b7c2e0ec\" (UID: \"1aea3621-829d-43f2-8986-a161b7c2e0ec\") " Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.073920 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1aea3621-829d-43f2-8986-a161b7c2e0ec-kube-api-access-crwxh" (OuterVolumeSpecName: "kube-api-access-crwxh") pod "1aea3621-829d-43f2-8986-a161b7c2e0ec" (UID: "1aea3621-829d-43f2-8986-a161b7c2e0ec"). InnerVolumeSpecName "kube-api-access-crwxh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.093259 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-config-data" (OuterVolumeSpecName: "config-data") pod "1aea3621-829d-43f2-8986-a161b7c2e0ec" (UID: "1aea3621-829d-43f2-8986-a161b7c2e0ec"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.100826 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1aea3621-829d-43f2-8986-a161b7c2e0ec" (UID: "1aea3621-829d-43f2-8986-a161b7c2e0ec"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.169641 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.169673 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-crwxh\" (UniqueName: \"kubernetes.io/projected/1aea3621-829d-43f2-8986-a161b7c2e0ec-kube-api-access-crwxh\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.169685 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1aea3621-829d-43f2-8986-a161b7c2e0ec-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.630448 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1aea3621-829d-43f2-8986-a161b7c2e0ec","Type":"ContainerDied","Data":"7ff9e05422876a9b4099d8a543ab7b08b5e295f4358bf1f8016b34df5e400f48"} Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.630498 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.631025 4706 scope.go:117] "RemoveContainer" containerID="ddc0a79dae2c71b344becd84edb231876d7c096c9c37c5b925786093e72c10c9" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.667960 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.677993 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.690962 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:57 crc kubenswrapper[4706]: E1206 05:48:57.691401 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.691420 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.691591 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" containerName="nova-cell0-conductor-conductor" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.692292 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.693790 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.694022 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xfwtm" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.700267 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.780086 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwhcj\" (UniqueName: \"kubernetes.io/projected/635ff13a-9863-4ae2-84df-78df1c359b9e-kube-api-access-kwhcj\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.780145 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ff13a-9863-4ae2-84df-78df1c359b9e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.780233 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635ff13a-9863-4ae2-84df-78df1c359b9e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.882436 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwhcj\" (UniqueName: \"kubernetes.io/projected/635ff13a-9863-4ae2-84df-78df1c359b9e-kube-api-access-kwhcj\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.882495 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ff13a-9863-4ae2-84df-78df1c359b9e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.882566 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635ff13a-9863-4ae2-84df-78df1c359b9e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.890206 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/635ff13a-9863-4ae2-84df-78df1c359b9e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.890268 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/635ff13a-9863-4ae2-84df-78df1c359b9e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:57 crc kubenswrapper[4706]: I1206 05:48:57.898382 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwhcj\" (UniqueName: \"kubernetes.io/projected/635ff13a-9863-4ae2-84df-78df1c359b9e-kube-api-access-kwhcj\") pod \"nova-cell0-conductor-0\" (UID: \"635ff13a-9863-4ae2-84df-78df1c359b9e\") " pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:58 crc kubenswrapper[4706]: I1206 05:48:58.019443 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:58 crc kubenswrapper[4706]: I1206 05:48:58.050312 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1aea3621-829d-43f2-8986-a161b7c2e0ec" path="/var/lib/kubelet/pods/1aea3621-829d-43f2-8986-a161b7c2e0ec/volumes" Dec 06 05:48:58 crc kubenswrapper[4706]: I1206 05:48:58.451293 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Dec 06 05:48:58 crc kubenswrapper[4706]: I1206 05:48:58.644256 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"635ff13a-9863-4ae2-84df-78df1c359b9e","Type":"ContainerStarted","Data":"0da7513cf031566b9a4885f2445bae27f3c764b8f99eada4b185bc3ea673685e"} Dec 06 05:48:59 crc kubenswrapper[4706]: I1206 05:48:59.659968 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"635ff13a-9863-4ae2-84df-78df1c359b9e","Type":"ContainerStarted","Data":"5bb9fffc4bec3369afde7502a1fbfbc7592a5ad57701e69154c1322e90b34038"} Dec 06 05:48:59 crc kubenswrapper[4706]: I1206 05:48:59.660444 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Dec 06 05:48:59 crc kubenswrapper[4706]: I1206 05:48:59.682537 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.682513772 podStartE2EDuration="2.682513772s" podCreationTimestamp="2025-12-06 05:48:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:48:59.674623049 +0000 UTC m=+1762.002447063" watchObservedRunningTime="2025-12-06 05:48:59.682513772 +0000 UTC m=+1762.010337756" Dec 06 05:49:01 crc kubenswrapper[4706]: I1206 05:49:01.043308 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 06 05:49:03 crc kubenswrapper[4706]: I1206 05:49:03.050227 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.214907 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-dcfr8"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.220687 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.224115 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.225894 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.229332 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dcfr8"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.376179 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.377647 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.379903 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.407809 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420233 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-config-data\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420273 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420307 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-config-data\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420329 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cdkl2\" (UniqueName: \"kubernetes.io/projected/b6e7005d-f57b-4d7c-a421-a259700fa0ad-kube-api-access-cdkl2\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420347 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-scripts\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420363 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.420412 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p84cv\" (UniqueName: \"kubernetes.io/projected/718deb27-0684-4cb2-8d09-60b73d7ed6d8-kube-api-access-p84cv\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.446089 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.447812 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.453132 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.499490 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522144 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522241 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2992k\" (UniqueName: \"kubernetes.io/projected/5dcecbbd-fabb-49a7-991c-073c0f1734cf-kube-api-access-2992k\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522276 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-config-data\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522301 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522379 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-config-data\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522403 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cdkl2\" (UniqueName: \"kubernetes.io/projected/b6e7005d-f57b-4d7c-a421-a259700fa0ad-kube-api-access-cdkl2\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522422 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-scripts\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522446 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522509 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.522545 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p84cv\" (UniqueName: \"kubernetes.io/projected/718deb27-0684-4cb2-8d09-60b73d7ed6d8-kube-api-access-p84cv\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.541554 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-scripts\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.542415 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.543427 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.544338 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-config-data\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.555380 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p84cv\" (UniqueName: \"kubernetes.io/projected/718deb27-0684-4cb2-8d09-60b73d7ed6d8-kube-api-access-p84cv\") pod \"nova-scheduler-0\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.561441 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cdkl2\" (UniqueName: \"kubernetes.io/projected/b6e7005d-f57b-4d7c-a421-a259700fa0ad-kube-api-access-cdkl2\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.561525 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.567683 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-config-data\") pod \"nova-cell0-cell-mapping-dcfr8\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.571696 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.578417 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.594706 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.596852 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.604902 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.623525 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624287 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d1ef50a-124a-4647-96fb-42f625df6099-logs\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624329 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xls5p\" (UniqueName: \"kubernetes.io/projected/7d1ef50a-124a-4647-96fb-42f625df6099-kube-api-access-xls5p\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624392 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624419 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e69fe9-fb4e-412f-87c6-a71da44e6756-logs\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624466 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624499 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-config-data\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624560 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-config-data\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624588 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624626 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr9n8\" (UniqueName: \"kubernetes.io/projected/a7e69fe9-fb4e-412f-87c6-a71da44e6756-kube-api-access-gr9n8\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624652 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2992k\" (UniqueName: \"kubernetes.io/projected/5dcecbbd-fabb-49a7-991c-073c0f1734cf-kube-api-access-2992k\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.624678 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.629848 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.651393 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.651849 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.652013 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.678692 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2992k\" (UniqueName: \"kubernetes.io/projected/5dcecbbd-fabb-49a7-991c-073c0f1734cf-kube-api-access-2992k\") pod \"nova-cell1-novncproxy-0\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.710472 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-6hcmg"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.712171 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.724001 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727230 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-config-data\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727267 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727296 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr9n8\" (UniqueName: \"kubernetes.io/projected/a7e69fe9-fb4e-412f-87c6-a71da44e6756-kube-api-access-gr9n8\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727320 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727364 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d1ef50a-124a-4647-96fb-42f625df6099-logs\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727386 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xls5p\" (UniqueName: \"kubernetes.io/projected/7d1ef50a-124a-4647-96fb-42f625df6099-kube-api-access-xls5p\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727455 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e69fe9-fb4e-412f-87c6-a71da44e6756-logs\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.727498 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-config-data\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.731214 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e69fe9-fb4e-412f-87c6-a71da44e6756-logs\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.731589 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d1ef50a-124a-4647-96fb-42f625df6099-logs\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.738709 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-config-data\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.738789 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-6hcmg"] Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.744885 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.747167 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.751627 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xls5p\" (UniqueName: \"kubernetes.io/projected/7d1ef50a-124a-4647-96fb-42f625df6099-kube-api-access-xls5p\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.755786 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-config-data\") pod \"nova-metadata-0\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.757366 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr9n8\" (UniqueName: \"kubernetes.io/projected/a7e69fe9-fb4e-412f-87c6-a71da44e6756-kube-api-access-gr9n8\") pod \"nova-api-0\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.777675 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.798750 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.828671 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-sb\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.828724 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-nb\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.829228 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-svc\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.829273 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk95q\" (UniqueName: \"kubernetes.io/projected/83c5c36d-bae3-4ca2-a542-7223116168e1-kube-api-access-hk95q\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.829301 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-swift-storage-0\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.829381 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-config\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.844789 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.932757 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-config\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.932824 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-sb\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.932849 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-nb\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.932960 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-svc\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.932982 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk95q\" (UniqueName: \"kubernetes.io/projected/83c5c36d-bae3-4ca2-a542-7223116168e1-kube-api-access-hk95q\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.933001 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-swift-storage-0\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.933847 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-swift-storage-0\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.934513 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-config\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.934874 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-nb\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.935348 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-svc\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.935418 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-sb\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:04 crc kubenswrapper[4706]: I1206 05:49:04.960328 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk95q\" (UniqueName: \"kubernetes.io/projected/83c5c36d-bae3-4ca2-a542-7223116168e1-kube-api-access-hk95q\") pod \"dnsmasq-dns-845d6d6f59-6hcmg\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.166836 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.173982 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-dcfr8"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.326860 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rxnl6"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.328713 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.332631 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.332652 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.347740 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rxnl6"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.364462 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-scripts\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.364520 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.365089 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wvsh\" (UniqueName: \"kubernetes.io/projected/ff26c6d0-68cb-4541-b647-3a0b244db53c-kube-api-access-5wvsh\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.365204 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-config-data\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.393834 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.466957 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-config-data\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.467096 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-scripts\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.467430 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.467489 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wvsh\" (UniqueName: \"kubernetes.io/projected/ff26c6d0-68cb-4541-b647-3a0b244db53c-kube-api-access-5wvsh\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.473017 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.474109 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-config-data\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.474597 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-scripts\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.487307 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wvsh\" (UniqueName: \"kubernetes.io/projected/ff26c6d0-68cb-4541-b647-3a0b244db53c-kube-api-access-5wvsh\") pod \"nova-cell1-conductor-db-sync-rxnl6\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.512958 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.532484 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.540255 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.670879 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.723108 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7d1ef50a-124a-4647-96fb-42f625df6099","Type":"ContainerStarted","Data":"2414aa87701ec3c697a52cfd4a62b07e45a65243f2237eca0e177eac9c7f295a"} Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.724593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dcfr8" event={"ID":"b6e7005d-f57b-4d7c-a421-a259700fa0ad","Type":"ContainerStarted","Data":"ab5ca92459da1cc6c6a1c005265b25f07940ee3f03808059516f92de97ba1a52"} Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.726030 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"718deb27-0684-4cb2-8d09-60b73d7ed6d8","Type":"ContainerStarted","Data":"6e109482e721b18bcdb76e08618b5e77cd09d88fd35a97c168df8bbdca343dc7"} Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.727215 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5dcecbbd-fabb-49a7-991c-073c0f1734cf","Type":"ContainerStarted","Data":"e46903b4dfeeecb719919c5e776a1b63cd6cd9cafa5bdf0614fe2ccdad771add"} Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.728078 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7e69fe9-fb4e-412f-87c6-a71da44e6756","Type":"ContainerStarted","Data":"b252ef544bbe3992498504e6f8295f0e5b800024b0159ca1169a919232a1d973"} Dec 06 05:49:05 crc kubenswrapper[4706]: I1206 05:49:05.815719 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-6hcmg"] Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.146294 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rxnl6"] Dec 06 05:49:06 crc kubenswrapper[4706]: W1206 05:49:06.146713 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podff26c6d0_68cb_4541_b647_3a0b244db53c.slice/crio-ee2c828d2c9746fa5357a2928d8b65728a523620cb4b125fcac0371e33ba9ad7 WatchSource:0}: Error finding container ee2c828d2c9746fa5357a2928d8b65728a523620cb4b125fcac0371e33ba9ad7: Status 404 returned error can't find the container with id ee2c828d2c9746fa5357a2928d8b65728a523620cb4b125fcac0371e33ba9ad7 Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.740815 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dcfr8" event={"ID":"b6e7005d-f57b-4d7c-a421-a259700fa0ad","Type":"ContainerStarted","Data":"653bcd3b827623823de59d10cf25ee9c8e417c5ea44bad8f8e3d0b03e626d53e"} Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.742909 4706 generic.go:334] "Generic (PLEG): container finished" podID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerID="a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf" exitCode=0 Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.742973 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" event={"ID":"83c5c36d-bae3-4ca2-a542-7223116168e1","Type":"ContainerDied","Data":"a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf"} Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.743005 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" event={"ID":"83c5c36d-bae3-4ca2-a542-7223116168e1","Type":"ContainerStarted","Data":"60fc26eaf45c856f8c91a3934a616fc127cadcc07c428b8e7d38fabe6e54fd01"} Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.747604 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" event={"ID":"ff26c6d0-68cb-4541-b647-3a0b244db53c","Type":"ContainerStarted","Data":"6821d12e2756ebfcf7ceff68b64c07924df203a6b5125fd4ed2e902ddec8a81d"} Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.747667 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" event={"ID":"ff26c6d0-68cb-4541-b647-3a0b244db53c","Type":"ContainerStarted","Data":"ee2c828d2c9746fa5357a2928d8b65728a523620cb4b125fcac0371e33ba9ad7"} Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.762868 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-dcfr8" podStartSLOduration=2.762851139 podStartE2EDuration="2.762851139s" podCreationTimestamp="2025-12-06 05:49:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:06.759034895 +0000 UTC m=+1769.086858849" watchObservedRunningTime="2025-12-06 05:49:06.762851139 +0000 UTC m=+1769.090675083" Dec 06 05:49:06 crc kubenswrapper[4706]: I1206 05:49:06.836905 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" podStartSLOduration=1.8368835780000001 podStartE2EDuration="1.836883578s" podCreationTimestamp="2025-12-06 05:49:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:06.824211816 +0000 UTC m=+1769.152035760" watchObservedRunningTime="2025-12-06 05:49:06.836883578 +0000 UTC m=+1769.164707532" Dec 06 05:49:07 crc kubenswrapper[4706]: I1206 05:49:07.760664 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" event={"ID":"83c5c36d-bae3-4ca2-a542-7223116168e1","Type":"ContainerStarted","Data":"d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1"} Dec 06 05:49:07 crc kubenswrapper[4706]: I1206 05:49:07.761143 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:07 crc kubenswrapper[4706]: I1206 05:49:07.787757 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" podStartSLOduration=3.787739345 podStartE2EDuration="3.787739345s" podCreationTimestamp="2025-12-06 05:49:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:07.780240792 +0000 UTC m=+1770.108064746" watchObservedRunningTime="2025-12-06 05:49:07.787739345 +0000 UTC m=+1770.115563289" Dec 06 05:49:09 crc kubenswrapper[4706]: I1206 05:49:09.346282 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:09 crc kubenswrapper[4706]: I1206 05:49:09.354240 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:09 crc kubenswrapper[4706]: I1206 05:49:09.514066 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:49:09 crc kubenswrapper[4706]: I1206 05:49:09.514662 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="32917516-145f-4318-a824-43d2fd3b5d85" containerName="kube-state-metrics" containerID="cri-o://1ae8c36354b8ded090cf54138a5767d9379f8ad8b265af89be3934ed91c072db" gracePeriod=30 Dec 06 05:49:09 crc kubenswrapper[4706]: I1206 05:49:09.779355 4706 generic.go:334] "Generic (PLEG): container finished" podID="32917516-145f-4318-a824-43d2fd3b5d85" containerID="1ae8c36354b8ded090cf54138a5767d9379f8ad8b265af89be3934ed91c072db" exitCode=2 Dec 06 05:49:09 crc kubenswrapper[4706]: I1206 05:49:09.779405 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"32917516-145f-4318-a824-43d2fd3b5d85","Type":"ContainerDied","Data":"1ae8c36354b8ded090cf54138a5767d9379f8ad8b265af89be3934ed91c072db"} Dec 06 05:49:10 crc kubenswrapper[4706]: I1206 05:49:10.036774 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:49:10 crc kubenswrapper[4706]: E1206 05:49:10.037069 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.078791 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.227951 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkm9n\" (UniqueName: \"kubernetes.io/projected/32917516-145f-4318-a824-43d2fd3b5d85-kube-api-access-zkm9n\") pod \"32917516-145f-4318-a824-43d2fd3b5d85\" (UID: \"32917516-145f-4318-a824-43d2fd3b5d85\") " Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.239525 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32917516-145f-4318-a824-43d2fd3b5d85-kube-api-access-zkm9n" (OuterVolumeSpecName: "kube-api-access-zkm9n") pod "32917516-145f-4318-a824-43d2fd3b5d85" (UID: "32917516-145f-4318-a824-43d2fd3b5d85"). InnerVolumeSpecName "kube-api-access-zkm9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.330520 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkm9n\" (UniqueName: \"kubernetes.io/projected/32917516-145f-4318-a824-43d2fd3b5d85-kube-api-access-zkm9n\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.462829 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.463394 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-central-agent" containerID="cri-o://475bda868e17a0e6b46d62d8c1944b4bdee84c33f4c517a7be11e819b561b717" gracePeriod=30 Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.463470 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="proxy-httpd" containerID="cri-o://73e5e3669e46a8ffa63177845a0d9a370cc014f56dd047af60fdcb1b35583456" gracePeriod=30 Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.463495 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-notification-agent" containerID="cri-o://96efc7cf41accb1ec53c024abfd3315d8d301f5c71385799d22b1cca9217cfd0" gracePeriod=30 Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.463545 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="sg-core" containerID="cri-o://011612a1717aab271f875fa95d5b5e62ab4c13f2052a112de429132626a14e4b" gracePeriod=30 Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.798164 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"32917516-145f-4318-a824-43d2fd3b5d85","Type":"ContainerDied","Data":"45bf7a73be46738e918eb53274ac11999f2bbe1783227c8b4a95f9b96fbcee12"} Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.798659 4706 scope.go:117] "RemoveContainer" containerID="1ae8c36354b8ded090cf54138a5767d9379f8ad8b265af89be3934ed91c072db" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.798874 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.839465 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.847755 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.857137 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:49:11 crc kubenswrapper[4706]: E1206 05:49:11.858066 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32917516-145f-4318-a824-43d2fd3b5d85" containerName="kube-state-metrics" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.858156 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="32917516-145f-4318-a824-43d2fd3b5d85" containerName="kube-state-metrics" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.859091 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="32917516-145f-4318-a824-43d2fd3b5d85" containerName="kube-state-metrics" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.860015 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.865809 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.868340 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Dec 06 05:49:11 crc kubenswrapper[4706]: I1206 05:49:11.869653 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.043859 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.043939 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pls57\" (UniqueName: \"kubernetes.io/projected/66632781-9905-4f3f-8945-92ca177cf2bc-kube-api-access-pls57\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.043967 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.043997 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.051215 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32917516-145f-4318-a824-43d2fd3b5d85" path="/var/lib/kubelet/pods/32917516-145f-4318-a824-43d2fd3b5d85/volumes" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.146248 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.146407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pls57\" (UniqueName: \"kubernetes.io/projected/66632781-9905-4f3f-8945-92ca177cf2bc-kube-api-access-pls57\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.146444 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.146480 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.151553 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.152650 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.153722 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66632781-9905-4f3f-8945-92ca177cf2bc-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.162900 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pls57\" (UniqueName: \"kubernetes.io/projected/66632781-9905-4f3f-8945-92ca177cf2bc-kube-api-access-pls57\") pod \"kube-state-metrics-0\" (UID: \"66632781-9905-4f3f-8945-92ca177cf2bc\") " pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.191943 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.808686 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"718deb27-0684-4cb2-8d09-60b73d7ed6d8","Type":"ContainerStarted","Data":"cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.810276 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="5dcecbbd-fabb-49a7-991c-073c0f1734cf" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://6eb7b5f5b433b36fbee084b102b39c645d3839994ca9d16aab07b400f4ea2218" gracePeriod=30 Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.810280 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5dcecbbd-fabb-49a7-991c-073c0f1734cf","Type":"ContainerStarted","Data":"6eb7b5f5b433b36fbee084b102b39c645d3839994ca9d16aab07b400f4ea2218"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.813033 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7e69fe9-fb4e-412f-87c6-a71da44e6756","Type":"ContainerStarted","Data":"002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.813080 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7e69fe9-fb4e-412f-87c6-a71da44e6756","Type":"ContainerStarted","Data":"bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.816332 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7d1ef50a-124a-4647-96fb-42f625df6099","Type":"ContainerStarted","Data":"ed380301caaf69c9aae3c5438ada13bc88eba088ba60bbac53917756b61c7cc4"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.816365 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7d1ef50a-124a-4647-96fb-42f625df6099","Type":"ContainerStarted","Data":"fd3603e4e0a64d881904389c9b2ad86c9c266878c57a1d6ec6aae91d7633a746"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.816428 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-metadata" containerID="cri-o://ed380301caaf69c9aae3c5438ada13bc88eba088ba60bbac53917756b61c7cc4" gracePeriod=30 Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.816434 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-log" containerID="cri-o://fd3603e4e0a64d881904389c9b2ad86c9c266878c57a1d6ec6aae91d7633a746" gracePeriod=30 Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.824422 4706 generic.go:334] "Generic (PLEG): container finished" podID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerID="73e5e3669e46a8ffa63177845a0d9a370cc014f56dd047af60fdcb1b35583456" exitCode=0 Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.824451 4706 generic.go:334] "Generic (PLEG): container finished" podID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerID="011612a1717aab271f875fa95d5b5e62ab4c13f2052a112de429132626a14e4b" exitCode=2 Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.824458 4706 generic.go:334] "Generic (PLEG): container finished" podID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerID="475bda868e17a0e6b46d62d8c1944b4bdee84c33f4c517a7be11e819b561b717" exitCode=0 Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.824481 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerDied","Data":"73e5e3669e46a8ffa63177845a0d9a370cc014f56dd047af60fdcb1b35583456"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.824505 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerDied","Data":"011612a1717aab271f875fa95d5b5e62ab4c13f2052a112de429132626a14e4b"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.824515 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerDied","Data":"475bda868e17a0e6b46d62d8c1944b4bdee84c33f4c517a7be11e819b561b717"} Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.829615 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.829822 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.406172907 podStartE2EDuration="8.829806959s" podCreationTimestamp="2025-12-06 05:49:04 +0000 UTC" firstStartedPulling="2025-12-06 05:49:05.404250328 +0000 UTC m=+1767.732074272" lastFinishedPulling="2025-12-06 05:49:10.82788438 +0000 UTC m=+1773.155708324" observedRunningTime="2025-12-06 05:49:12.827504008 +0000 UTC m=+1775.155327952" watchObservedRunningTime="2025-12-06 05:49:12.829806959 +0000 UTC m=+1775.157630903" Dec 06 05:49:12 crc kubenswrapper[4706]: W1206 05:49:12.839971 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66632781_9905_4f3f_8945_92ca177cf2bc.slice/crio-f7dda741e489072f34c5012fe5db9e760220ca4fd028a7f62a566d1fdbfebdaf WatchSource:0}: Error finding container f7dda741e489072f34c5012fe5db9e760220ca4fd028a7f62a566d1fdbfebdaf: Status 404 returned error can't find the container with id f7dda741e489072f34c5012fe5db9e760220ca4fd028a7f62a566d1fdbfebdaf Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.863818 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.5859319640000002 podStartE2EDuration="8.863800889s" podCreationTimestamp="2025-12-06 05:49:04 +0000 UTC" firstStartedPulling="2025-12-06 05:49:05.554110246 +0000 UTC m=+1767.881934190" lastFinishedPulling="2025-12-06 05:49:10.831979171 +0000 UTC m=+1773.159803115" observedRunningTime="2025-12-06 05:49:12.860581351 +0000 UTC m=+1775.188405295" watchObservedRunningTime="2025-12-06 05:49:12.863800889 +0000 UTC m=+1775.191624843" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.892278 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.567937386 podStartE2EDuration="8.892256867s" podCreationTimestamp="2025-12-06 05:49:04 +0000 UTC" firstStartedPulling="2025-12-06 05:49:05.509425978 +0000 UTC m=+1767.837249922" lastFinishedPulling="2025-12-06 05:49:10.833745459 +0000 UTC m=+1773.161569403" observedRunningTime="2025-12-06 05:49:12.877801546 +0000 UTC m=+1775.205625490" watchObservedRunningTime="2025-12-06 05:49:12.892256867 +0000 UTC m=+1775.220080821" Dec 06 05:49:12 crc kubenswrapper[4706]: I1206 05:49:12.900385 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.612820299 podStartE2EDuration="8.900348236s" podCreationTimestamp="2025-12-06 05:49:04 +0000 UTC" firstStartedPulling="2025-12-06 05:49:05.548165365 +0000 UTC m=+1767.875989309" lastFinishedPulling="2025-12-06 05:49:10.835693292 +0000 UTC m=+1773.163517246" observedRunningTime="2025-12-06 05:49:12.899951624 +0000 UTC m=+1775.227775578" watchObservedRunningTime="2025-12-06 05:49:12.900348236 +0000 UTC m=+1775.228172180" Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.834580 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"66632781-9905-4f3f-8945-92ca177cf2bc","Type":"ContainerStarted","Data":"28f7e7ba2bc6ad8df2a13c5d7c1e88896aeaad0d8a8f8b8484d69f0d4a6d3107"} Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.834854 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"66632781-9905-4f3f-8945-92ca177cf2bc","Type":"ContainerStarted","Data":"f7dda741e489072f34c5012fe5db9e760220ca4fd028a7f62a566d1fdbfebdaf"} Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.834874 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.838084 4706 generic.go:334] "Generic (PLEG): container finished" podID="7d1ef50a-124a-4647-96fb-42f625df6099" containerID="fd3603e4e0a64d881904389c9b2ad86c9c266878c57a1d6ec6aae91d7633a746" exitCode=143 Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.838136 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7d1ef50a-124a-4647-96fb-42f625df6099","Type":"ContainerDied","Data":"fd3603e4e0a64d881904389c9b2ad86c9c266878c57a1d6ec6aae91d7633a746"} Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.840539 4706 generic.go:334] "Generic (PLEG): container finished" podID="b6e7005d-f57b-4d7c-a421-a259700fa0ad" containerID="653bcd3b827623823de59d10cf25ee9c8e417c5ea44bad8f8e3d0b03e626d53e" exitCode=0 Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.840601 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dcfr8" event={"ID":"b6e7005d-f57b-4d7c-a421-a259700fa0ad","Type":"ContainerDied","Data":"653bcd3b827623823de59d10cf25ee9c8e417c5ea44bad8f8e3d0b03e626d53e"} Dec 06 05:49:13 crc kubenswrapper[4706]: I1206 05:49:13.857484 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.493046356 podStartE2EDuration="2.85746676s" podCreationTimestamp="2025-12-06 05:49:11 +0000 UTC" firstStartedPulling="2025-12-06 05:49:12.842646527 +0000 UTC m=+1775.170470471" lastFinishedPulling="2025-12-06 05:49:13.207066931 +0000 UTC m=+1775.534890875" observedRunningTime="2025-12-06 05:49:13.851256153 +0000 UTC m=+1776.179080107" watchObservedRunningTime="2025-12-06 05:49:13.85746676 +0000 UTC m=+1776.185290704" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.725677 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.726196 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.775707 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.778483 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.799743 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.799799 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.847369 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.847415 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 06 05:49:14 crc kubenswrapper[4706]: I1206 05:49:14.889357 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.171241 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.236246 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-z825x"] Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.236485 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-z825x" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="dnsmasq-dns" containerID="cri-o://19d4f5cba503e86f785f6ea782d6767afea8fa684b7ed675ae51ebd06ff23612" gracePeriod=10 Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.538969 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-z825x" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.164:5353: connect: connection refused" Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.861536 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-dcfr8" event={"ID":"b6e7005d-f57b-4d7c-a421-a259700fa0ad","Type":"ContainerDied","Data":"ab5ca92459da1cc6c6a1c005265b25f07940ee3f03808059516f92de97ba1a52"} Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.861605 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab5ca92459da1cc6c6a1c005265b25f07940ee3f03808059516f92de97ba1a52" Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.871395 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.883221 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 06 05:49:15 crc kubenswrapper[4706]: I1206 05:49:15.883238 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.017267 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cdkl2\" (UniqueName: \"kubernetes.io/projected/b6e7005d-f57b-4d7c-a421-a259700fa0ad-kube-api-access-cdkl2\") pod \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.017655 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-scripts\") pod \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.017718 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-combined-ca-bundle\") pod \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.017799 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-config-data\") pod \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\" (UID: \"b6e7005d-f57b-4d7c-a421-a259700fa0ad\") " Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.030413 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6e7005d-f57b-4d7c-a421-a259700fa0ad-kube-api-access-cdkl2" (OuterVolumeSpecName: "kube-api-access-cdkl2") pod "b6e7005d-f57b-4d7c-a421-a259700fa0ad" (UID: "b6e7005d-f57b-4d7c-a421-a259700fa0ad"). InnerVolumeSpecName "kube-api-access-cdkl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.030420 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-scripts" (OuterVolumeSpecName: "scripts") pod "b6e7005d-f57b-4d7c-a421-a259700fa0ad" (UID: "b6e7005d-f57b-4d7c-a421-a259700fa0ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.048259 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-config-data" (OuterVolumeSpecName: "config-data") pod "b6e7005d-f57b-4d7c-a421-a259700fa0ad" (UID: "b6e7005d-f57b-4d7c-a421-a259700fa0ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.062217 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b6e7005d-f57b-4d7c-a421-a259700fa0ad" (UID: "b6e7005d-f57b-4d7c-a421-a259700fa0ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.123079 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.123113 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cdkl2\" (UniqueName: \"kubernetes.io/projected/b6e7005d-f57b-4d7c-a421-a259700fa0ad-kube-api-access-cdkl2\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.123126 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.123135 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b6e7005d-f57b-4d7c-a421-a259700fa0ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.873858 4706 generic.go:334] "Generic (PLEG): container finished" podID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerID="19d4f5cba503e86f785f6ea782d6767afea8fa684b7ed675ae51ebd06ff23612" exitCode=0 Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.873925 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-z825x" event={"ID":"709e6d78-3db4-4779-a1ad-4c7eda89838d","Type":"ContainerDied","Data":"19d4f5cba503e86f785f6ea782d6767afea8fa684b7ed675ae51ebd06ff23612"} Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.876100 4706 generic.go:334] "Generic (PLEG): container finished" podID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerID="96efc7cf41accb1ec53c024abfd3315d8d301f5c71385799d22b1cca9217cfd0" exitCode=0 Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.877156 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerDied","Data":"96efc7cf41accb1ec53c024abfd3315d8d301f5c71385799d22b1cca9217cfd0"} Dec 06 05:49:16 crc kubenswrapper[4706]: I1206 05:49:16.877210 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-dcfr8" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.006501 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.006712 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-log" containerID="cri-o://bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711" gracePeriod=30 Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.006835 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-api" containerID="cri-o://002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff" gracePeriod=30 Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.363931 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.661073 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.669772 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.863762 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-swift-storage-0\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864219 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-log-httpd\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864291 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-config\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864393 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-config-data\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864417 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-scripts\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864487 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqpsg\" (UniqueName: \"kubernetes.io/projected/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-kube-api-access-gqpsg\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864561 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-combined-ca-bundle\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864621 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864723 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-run-httpd\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864819 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vnbj6\" (UniqueName: \"kubernetes.io/projected/709e6d78-3db4-4779-a1ad-4c7eda89838d-kube-api-access-vnbj6\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864879 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-nb\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864887 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.864967 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-sb\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.865016 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-sg-core-conf-yaml\") pod \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\" (UID: \"1cc02a95-a453-4485-a066-9fe2ecfcc3c5\") " Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.865567 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.866476 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.870333 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-scripts" (OuterVolumeSpecName: "scripts") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.872956 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-kube-api-access-gqpsg" (OuterVolumeSpecName: "kube-api-access-gqpsg") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "kube-api-access-gqpsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.877557 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/709e6d78-3db4-4779-a1ad-4c7eda89838d-kube-api-access-vnbj6" (OuterVolumeSpecName: "kube-api-access-vnbj6") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "kube-api-access-vnbj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.901729 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"1cc02a95-a453-4485-a066-9fe2ecfcc3c5","Type":"ContainerDied","Data":"ef3484541eb3f8107579c1f24c33af9bf12d39defdf96d7bcffb8714fbeb048d"} Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.902066 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.902077 4706 scope.go:117] "RemoveContainer" containerID="73e5e3669e46a8ffa63177845a0d9a370cc014f56dd047af60fdcb1b35583456" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.904378 4706 generic.go:334] "Generic (PLEG): container finished" podID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerID="bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711" exitCode=143 Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.904455 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7e69fe9-fb4e-412f-87c6-a71da44e6756","Type":"ContainerDied","Data":"bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711"} Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.906854 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-z825x" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.906865 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-z825x" event={"ID":"709e6d78-3db4-4779-a1ad-4c7eda89838d","Type":"ContainerDied","Data":"58d90addc5c9c3622152629d6a3912fa68a47e7ab91f27254c561fcd305e64b9"} Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.906863 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" containerName="nova-scheduler-scheduler" containerID="cri-o://cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0" gracePeriod=30 Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.926500 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.929723 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.941484 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.948613 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-config" (OuterVolumeSpecName: "config") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.957135 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.971040 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.971277 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc\") pod \"709e6d78-3db4-4779-a1ad-4c7eda89838d\" (UID: \"709e6d78-3db4-4779-a1ad-4c7eda89838d\") " Dec 06 05:49:17 crc kubenswrapper[4706]: W1206 05:49:17.971962 4706 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/709e6d78-3db4-4779-a1ad-4c7eda89838d/volumes/kubernetes.io~configmap/dns-svc Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.971988 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "709e6d78-3db4-4779-a1ad-4c7eda89838d" (UID: "709e6d78-3db4-4779-a1ad-4c7eda89838d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972399 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972431 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972441 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972450 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqpsg\" (UniqueName: \"kubernetes.io/projected/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-kube-api-access-gqpsg\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972460 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972468 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972477 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vnbj6\" (UniqueName: \"kubernetes.io/projected/709e6d78-3db4-4779-a1ad-4c7eda89838d-kube-api-access-vnbj6\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972485 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972512 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/709e6d78-3db4-4779-a1ad-4c7eda89838d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.972524 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.976081 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-config-data" (OuterVolumeSpecName: "config-data") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:17 crc kubenswrapper[4706]: I1206 05:49:17.976428 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1cc02a95-a453-4485-a066-9fe2ecfcc3c5" (UID: "1cc02a95-a453-4485-a066-9fe2ecfcc3c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.003029 4706 scope.go:117] "RemoveContainer" containerID="011612a1717aab271f875fa95d5b5e62ab4c13f2052a112de429132626a14e4b" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.025214 4706 scope.go:117] "RemoveContainer" containerID="96efc7cf41accb1ec53c024abfd3315d8d301f5c71385799d22b1cca9217cfd0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.047409 4706 scope.go:117] "RemoveContainer" containerID="475bda868e17a0e6b46d62d8c1944b4bdee84c33f4c517a7be11e819b561b717" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.074372 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.074399 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cc02a95-a453-4485-a066-9fe2ecfcc3c5-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.079151 4706 scope.go:117] "RemoveContainer" containerID="19d4f5cba503e86f785f6ea782d6767afea8fa684b7ed675ae51ebd06ff23612" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.099960 4706 scope.go:117] "RemoveContainer" containerID="551ad65c832494f001ee7773a3ef5d48f896bd4ff3cbefbffc0c6f067489305c" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.231177 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.247212 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.256940 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-z825x"] Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.264521 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-z825x"] Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.272634 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273156 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-notification-agent" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273171 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-notification-agent" Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273184 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="sg-core" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273191 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="sg-core" Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273212 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6e7005d-f57b-4d7c-a421-a259700fa0ad" containerName="nova-manage" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273217 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6e7005d-f57b-4d7c-a421-a259700fa0ad" containerName="nova-manage" Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273226 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="init" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273232 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="init" Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273253 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-central-agent" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273259 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-central-agent" Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273309 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="proxy-httpd" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273316 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="proxy-httpd" Dec 06 05:49:18 crc kubenswrapper[4706]: E1206 05:49:18.273328 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="dnsmasq-dns" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273336 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="dnsmasq-dns" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273534 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-central-agent" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273545 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="sg-core" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273556 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6e7005d-f57b-4d7c-a421-a259700fa0ad" containerName="nova-manage" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273575 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="ceilometer-notification-agent" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273585 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" containerName="proxy-httpd" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.273595 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" containerName="dnsmasq-dns" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.275758 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.277743 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.279166 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.279363 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.280654 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.379783 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-scripts\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.379827 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-run-httpd\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.379864 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksk9n\" (UniqueName: \"kubernetes.io/projected/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-kube-api-access-ksk9n\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.379909 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-log-httpd\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.379932 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.380072 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-config-data\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.380340 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.380400 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.481997 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482642 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482699 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-scripts\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482734 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-run-httpd\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482791 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksk9n\" (UniqueName: \"kubernetes.io/projected/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-kube-api-access-ksk9n\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482824 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-log-httpd\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482843 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.482883 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-config-data\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.483333 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-run-httpd\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.483412 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-log-httpd\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.486603 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.488792 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.489119 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-config-data\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.489598 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.493643 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-scripts\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.502774 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksk9n\" (UniqueName: \"kubernetes.io/projected/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-kube-api-access-ksk9n\") pod \"ceilometer-0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " pod="openstack/ceilometer-0" Dec 06 05:49:18 crc kubenswrapper[4706]: I1206 05:49:18.673903 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:19 crc kubenswrapper[4706]: W1206 05:49:19.120523 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2849104_5ab6_4332_8bca_dc2f9d0cdec0.slice/crio-ee8b575b3f8ed004dc17c066257a889bfcebeec34e269ecaa022035cc0448065 WatchSource:0}: Error finding container ee8b575b3f8ed004dc17c066257a889bfcebeec34e269ecaa022035cc0448065: Status 404 returned error can't find the container with id ee8b575b3f8ed004dc17c066257a889bfcebeec34e269ecaa022035cc0448065 Dec 06 05:49:19 crc kubenswrapper[4706]: I1206 05:49:19.122885 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:19 crc kubenswrapper[4706]: E1206 05:49:19.728416 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 06 05:49:19 crc kubenswrapper[4706]: E1206 05:49:19.730601 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 06 05:49:19 crc kubenswrapper[4706]: E1206 05:49:19.732975 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 06 05:49:19 crc kubenswrapper[4706]: E1206 05:49:19.733085 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" containerName="nova-scheduler-scheduler" Dec 06 05:49:19 crc kubenswrapper[4706]: I1206 05:49:19.937607 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerStarted","Data":"ee8b575b3f8ed004dc17c066257a889bfcebeec34e269ecaa022035cc0448065"} Dec 06 05:49:20 crc kubenswrapper[4706]: I1206 05:49:20.047147 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cc02a95-a453-4485-a066-9fe2ecfcc3c5" path="/var/lib/kubelet/pods/1cc02a95-a453-4485-a066-9fe2ecfcc3c5/volumes" Dec 06 05:49:20 crc kubenswrapper[4706]: I1206 05:49:20.048022 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="709e6d78-3db4-4779-a1ad-4c7eda89838d" path="/var/lib/kubelet/pods/709e6d78-3db4-4779-a1ad-4c7eda89838d/volumes" Dec 06 05:49:20 crc kubenswrapper[4706]: I1206 05:49:20.947400 4706 generic.go:334] "Generic (PLEG): container finished" podID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" containerID="cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0" exitCode=0 Dec 06 05:49:20 crc kubenswrapper[4706]: I1206 05:49:20.947489 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"718deb27-0684-4cb2-8d09-60b73d7ed6d8","Type":"ContainerDied","Data":"cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0"} Dec 06 05:49:21 crc kubenswrapper[4706]: I1206 05:49:21.921279 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:49:21 crc kubenswrapper[4706]: I1206 05:49:21.960973 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"718deb27-0684-4cb2-8d09-60b73d7ed6d8","Type":"ContainerDied","Data":"6e109482e721b18bcdb76e08618b5e77cd09d88fd35a97c168df8bbdca343dc7"} Dec 06 05:49:21 crc kubenswrapper[4706]: I1206 05:49:21.961085 4706 scope.go:117] "RemoveContainer" containerID="cb3931e194573f40dd7612115f12a8f349de48732cbc39e63a5f27b68bc8f8a0" Dec 06 05:49:21 crc kubenswrapper[4706]: I1206 05:49:21.961287 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.047683 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-combined-ca-bundle\") pod \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.047823 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-config-data\") pod \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.047867 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p84cv\" (UniqueName: \"kubernetes.io/projected/718deb27-0684-4cb2-8d09-60b73d7ed6d8-kube-api-access-p84cv\") pod \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\" (UID: \"718deb27-0684-4cb2-8d09-60b73d7ed6d8\") " Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.051871 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/718deb27-0684-4cb2-8d09-60b73d7ed6d8-kube-api-access-p84cv" (OuterVolumeSpecName: "kube-api-access-p84cv") pod "718deb27-0684-4cb2-8d09-60b73d7ed6d8" (UID: "718deb27-0684-4cb2-8d09-60b73d7ed6d8"). InnerVolumeSpecName "kube-api-access-p84cv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.072795 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-config-data" (OuterVolumeSpecName: "config-data") pod "718deb27-0684-4cb2-8d09-60b73d7ed6d8" (UID: "718deb27-0684-4cb2-8d09-60b73d7ed6d8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.075226 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "718deb27-0684-4cb2-8d09-60b73d7ed6d8" (UID: "718deb27-0684-4cb2-8d09-60b73d7ed6d8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.151202 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.151237 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p84cv\" (UniqueName: \"kubernetes.io/projected/718deb27-0684-4cb2-8d09-60b73d7ed6d8-kube-api-access-p84cv\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.151251 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/718deb27-0684-4cb2-8d09-60b73d7ed6d8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.206227 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.300854 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.324371 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.336468 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:22 crc kubenswrapper[4706]: E1206 05:49:22.336885 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" containerName="nova-scheduler-scheduler" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.336904 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" containerName="nova-scheduler-scheduler" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.337157 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" containerName="nova-scheduler-scheduler" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.337938 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.344588 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.346235 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.456683 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.457223 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g2b4\" (UniqueName: \"kubernetes.io/projected/0d69c3f0-9226-4083-87a6-69e589b0869b-kube-api-access-2g2b4\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.457528 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-config-data\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.559460 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-config-data\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.559582 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.559640 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g2b4\" (UniqueName: \"kubernetes.io/projected/0d69c3f0-9226-4083-87a6-69e589b0869b-kube-api-access-2g2b4\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.564693 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-config-data\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.567721 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.578181 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g2b4\" (UniqueName: \"kubernetes.io/projected/0d69c3f0-9226-4083-87a6-69e589b0869b-kube-api-access-2g2b4\") pod \"nova-scheduler-0\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " pod="openstack/nova-scheduler-0" Dec 06 05:49:22 crc kubenswrapper[4706]: I1206 05:49:22.662586 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:49:23 crc kubenswrapper[4706]: I1206 05:49:23.114877 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:49:23 crc kubenswrapper[4706]: I1206 05:49:23.420236 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:49:23 crc kubenswrapper[4706]: I1206 05:49:23.993500 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d69c3f0-9226-4083-87a6-69e589b0869b","Type":"ContainerStarted","Data":"ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37"} Dec 06 05:49:23 crc kubenswrapper[4706]: I1206 05:49:23.993575 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d69c3f0-9226-4083-87a6-69e589b0869b","Type":"ContainerStarted","Data":"9cebe9321928512d67539413a8d06c13497dcf065df8ac83d65c09e3d27d54e9"} Dec 06 05:49:23 crc kubenswrapper[4706]: I1206 05:49:23.996013 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerStarted","Data":"f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6"} Dec 06 05:49:24 crc kubenswrapper[4706]: I1206 05:49:24.021975 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.021941581 podStartE2EDuration="2.021941581s" podCreationTimestamp="2025-12-06 05:49:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:24.015707732 +0000 UTC m=+1786.343531706" watchObservedRunningTime="2025-12-06 05:49:24.021941581 +0000 UTC m=+1786.349765565" Dec 06 05:49:24 crc kubenswrapper[4706]: I1206 05:49:24.046555 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="718deb27-0684-4cb2-8d09-60b73d7ed6d8" path="/var/lib/kubelet/pods/718deb27-0684-4cb2-8d09-60b73d7ed6d8/volumes" Dec 06 05:49:24 crc kubenswrapper[4706]: I1206 05:49:24.877478 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.004149 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-combined-ca-bundle\") pod \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.004251 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr9n8\" (UniqueName: \"kubernetes.io/projected/a7e69fe9-fb4e-412f-87c6-a71da44e6756-kube-api-access-gr9n8\") pod \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.004398 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e69fe9-fb4e-412f-87c6-a71da44e6756-logs\") pod \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.004448 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-config-data\") pod \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\" (UID: \"a7e69fe9-fb4e-412f-87c6-a71da44e6756\") " Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.004858 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7e69fe9-fb4e-412f-87c6-a71da44e6756-logs" (OuterVolumeSpecName: "logs") pod "a7e69fe9-fb4e-412f-87c6-a71da44e6756" (UID: "a7e69fe9-fb4e-412f-87c6-a71da44e6756"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.009197 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7e69fe9-fb4e-412f-87c6-a71da44e6756-kube-api-access-gr9n8" (OuterVolumeSpecName: "kube-api-access-gr9n8") pod "a7e69fe9-fb4e-412f-87c6-a71da44e6756" (UID: "a7e69fe9-fb4e-412f-87c6-a71da44e6756"). InnerVolumeSpecName "kube-api-access-gr9n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.019502 4706 generic.go:334] "Generic (PLEG): container finished" podID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerID="002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff" exitCode=0 Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.019568 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7e69fe9-fb4e-412f-87c6-a71da44e6756","Type":"ContainerDied","Data":"002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff"} Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.019598 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7e69fe9-fb4e-412f-87c6-a71da44e6756","Type":"ContainerDied","Data":"b252ef544bbe3992498504e6f8295f0e5b800024b0159ca1169a919232a1d973"} Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.019614 4706 scope.go:117] "RemoveContainer" containerID="002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.019728 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.031170 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7e69fe9-fb4e-412f-87c6-a71da44e6756" (UID: "a7e69fe9-fb4e-412f-87c6-a71da44e6756"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.032432 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerStarted","Data":"eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c"} Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.035608 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-config-data" (OuterVolumeSpecName: "config-data") pod "a7e69fe9-fb4e-412f-87c6-a71da44e6756" (UID: "a7e69fe9-fb4e-412f-87c6-a71da44e6756"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.036123 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:49:25 crc kubenswrapper[4706]: E1206 05:49:25.036632 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.106172 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7e69fe9-fb4e-412f-87c6-a71da44e6756-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.106392 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.106504 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7e69fe9-fb4e-412f-87c6-a71da44e6756-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.106580 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr9n8\" (UniqueName: \"kubernetes.io/projected/a7e69fe9-fb4e-412f-87c6-a71da44e6756-kube-api-access-gr9n8\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.186075 4706 scope.go:117] "RemoveContainer" containerID="bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.211854 4706 scope.go:117] "RemoveContainer" containerID="002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff" Dec 06 05:49:25 crc kubenswrapper[4706]: E1206 05:49:25.218122 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff\": container with ID starting with 002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff not found: ID does not exist" containerID="002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.218186 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff"} err="failed to get container status \"002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff\": rpc error: code = NotFound desc = could not find container \"002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff\": container with ID starting with 002268c3c64876de52543f6c3f8fba92547392e5d8db42c410e83a7dd05464ff not found: ID does not exist" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.218224 4706 scope.go:117] "RemoveContainer" containerID="bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711" Dec 06 05:49:25 crc kubenswrapper[4706]: E1206 05:49:25.221433 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711\": container with ID starting with bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711 not found: ID does not exist" containerID="bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.221564 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711"} err="failed to get container status \"bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711\": rpc error: code = NotFound desc = could not find container \"bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711\": container with ID starting with bcd2fdb69a82447d3f4f66cd605572d2638fad9888feafba95ae6ca09f5fd711 not found: ID does not exist" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.381889 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.394193 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.403416 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:25 crc kubenswrapper[4706]: E1206 05:49:25.404299 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-log" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.404445 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-log" Dec 06 05:49:25 crc kubenswrapper[4706]: E1206 05:49:25.404625 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-api" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.404730 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-api" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.405183 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-log" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.405322 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" containerName="nova-api-api" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.409291 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.411298 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.423117 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.513783 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-logs\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.513863 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.513914 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qql2\" (UniqueName: \"kubernetes.io/projected/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-kube-api-access-6qql2\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.513945 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-config-data\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.619150 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-logs\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.619236 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.619287 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qql2\" (UniqueName: \"kubernetes.io/projected/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-kube-api-access-6qql2\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.619580 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-config-data\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.619654 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-logs\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.624165 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.626133 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-config-data\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.644701 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qql2\" (UniqueName: \"kubernetes.io/projected/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-kube-api-access-6qql2\") pod \"nova-api-0\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " pod="openstack/nova-api-0" Dec 06 05:49:25 crc kubenswrapper[4706]: I1206 05:49:25.734285 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:26 crc kubenswrapper[4706]: I1206 05:49:26.054793 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7e69fe9-fb4e-412f-87c6-a71da44e6756" path="/var/lib/kubelet/pods/a7e69fe9-fb4e-412f-87c6-a71da44e6756/volumes" Dec 06 05:49:26 crc kubenswrapper[4706]: I1206 05:49:26.055808 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerStarted","Data":"2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d"} Dec 06 05:49:26 crc kubenswrapper[4706]: I1206 05:49:26.237501 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.058888 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0131a4e1-1fee-4c16-a1a2-6d4f73d66051","Type":"ContainerStarted","Data":"21ef3c88a7c7c8c49077fb83fc9af81de9fd3ea7193bf6fc2d48deb4b03fa483"} Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.059290 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0131a4e1-1fee-4c16-a1a2-6d4f73d66051","Type":"ContainerStarted","Data":"17e2f53a416c9e33b4c1335f2131a80449b693985765c8def2dbe4645b566c84"} Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.059312 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0131a4e1-1fee-4c16-a1a2-6d4f73d66051","Type":"ContainerStarted","Data":"399b6a1c5c1c1e2a77cd046323e2816b6260b935199421f077bfa17533322069"} Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.060436 4706 generic.go:334] "Generic (PLEG): container finished" podID="ff26c6d0-68cb-4541-b647-3a0b244db53c" containerID="6821d12e2756ebfcf7ceff68b64c07924df203a6b5125fd4ed2e902ddec8a81d" exitCode=0 Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.060564 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" event={"ID":"ff26c6d0-68cb-4541-b647-3a0b244db53c","Type":"ContainerDied","Data":"6821d12e2756ebfcf7ceff68b64c07924df203a6b5125fd4ed2e902ddec8a81d"} Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.087032 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.08701058 podStartE2EDuration="2.08701058s" podCreationTimestamp="2025-12-06 05:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:27.076773323 +0000 UTC m=+1789.404597267" watchObservedRunningTime="2025-12-06 05:49:27.08701058 +0000 UTC m=+1789.414834524" Dec 06 05:49:27 crc kubenswrapper[4706]: I1206 05:49:27.670501 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.081666 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerStarted","Data":"8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78"} Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.106494 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.143612624 podStartE2EDuration="10.10646933s" podCreationTimestamp="2025-12-06 05:49:18 +0000 UTC" firstStartedPulling="2025-12-06 05:49:19.123328701 +0000 UTC m=+1781.451152645" lastFinishedPulling="2025-12-06 05:49:27.086185407 +0000 UTC m=+1789.414009351" observedRunningTime="2025-12-06 05:49:28.099910242 +0000 UTC m=+1790.427734196" watchObservedRunningTime="2025-12-06 05:49:28.10646933 +0000 UTC m=+1790.434293274" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.488258 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.696721 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5wvsh\" (UniqueName: \"kubernetes.io/projected/ff26c6d0-68cb-4541-b647-3a0b244db53c-kube-api-access-5wvsh\") pod \"ff26c6d0-68cb-4541-b647-3a0b244db53c\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.696830 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-combined-ca-bundle\") pod \"ff26c6d0-68cb-4541-b647-3a0b244db53c\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.696921 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-scripts\") pod \"ff26c6d0-68cb-4541-b647-3a0b244db53c\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.696943 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-config-data\") pod \"ff26c6d0-68cb-4541-b647-3a0b244db53c\" (UID: \"ff26c6d0-68cb-4541-b647-3a0b244db53c\") " Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.710211 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-scripts" (OuterVolumeSpecName: "scripts") pod "ff26c6d0-68cb-4541-b647-3a0b244db53c" (UID: "ff26c6d0-68cb-4541-b647-3a0b244db53c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.725262 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff26c6d0-68cb-4541-b647-3a0b244db53c-kube-api-access-5wvsh" (OuterVolumeSpecName: "kube-api-access-5wvsh") pod "ff26c6d0-68cb-4541-b647-3a0b244db53c" (UID: "ff26c6d0-68cb-4541-b647-3a0b244db53c"). InnerVolumeSpecName "kube-api-access-5wvsh". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.751279 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ff26c6d0-68cb-4541-b647-3a0b244db53c" (UID: "ff26c6d0-68cb-4541-b647-3a0b244db53c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.751381 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-config-data" (OuterVolumeSpecName: "config-data") pod "ff26c6d0-68cb-4541-b647-3a0b244db53c" (UID: "ff26c6d0-68cb-4541-b647-3a0b244db53c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.799342 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5wvsh\" (UniqueName: \"kubernetes.io/projected/ff26c6d0-68cb-4541-b647-3a0b244db53c-kube-api-access-5wvsh\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.799383 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.799396 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:28 crc kubenswrapper[4706]: I1206 05:49:28.799408 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ff26c6d0-68cb-4541-b647-3a0b244db53c-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.110831 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.111656 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-rxnl6" event={"ID":"ff26c6d0-68cb-4541-b647-3a0b244db53c","Type":"ContainerDied","Data":"ee2c828d2c9746fa5357a2928d8b65728a523620cb4b125fcac0371e33ba9ad7"} Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.111686 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ee2c828d2c9746fa5357a2928d8b65728a523620cb4b125fcac0371e33ba9ad7" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.111700 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.274546 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 06 05:49:29 crc kubenswrapper[4706]: E1206 05:49:29.274921 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff26c6d0-68cb-4541-b647-3a0b244db53c" containerName="nova-cell1-conductor-db-sync" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.274938 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff26c6d0-68cb-4541-b647-3a0b244db53c" containerName="nova-cell1-conductor-db-sync" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.275177 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff26c6d0-68cb-4541-b647-3a0b244db53c" containerName="nova-cell1-conductor-db-sync" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.276805 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.281242 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.305718 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.409010 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rj5jb\" (UniqueName: \"kubernetes.io/projected/30a8debc-3590-46cb-9042-5cf8fe5a87d6-kube-api-access-rj5jb\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.409087 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a8debc-3590-46cb-9042-5cf8fe5a87d6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.409111 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a8debc-3590-46cb-9042-5cf8fe5a87d6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.511282 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rj5jb\" (UniqueName: \"kubernetes.io/projected/30a8debc-3590-46cb-9042-5cf8fe5a87d6-kube-api-access-rj5jb\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.511345 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a8debc-3590-46cb-9042-5cf8fe5a87d6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.511365 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a8debc-3590-46cb-9042-5cf8fe5a87d6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.516114 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/30a8debc-3590-46cb-9042-5cf8fe5a87d6-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.519461 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/30a8debc-3590-46cb-9042-5cf8fe5a87d6-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.546579 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rj5jb\" (UniqueName: \"kubernetes.io/projected/30a8debc-3590-46cb-9042-5cf8fe5a87d6-kube-api-access-rj5jb\") pod \"nova-cell1-conductor-0\" (UID: \"30a8debc-3590-46cb-9042-5cf8fe5a87d6\") " pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:29 crc kubenswrapper[4706]: I1206 05:49:29.606433 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:30 crc kubenswrapper[4706]: I1206 05:49:30.048329 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Dec 06 05:49:30 crc kubenswrapper[4706]: I1206 05:49:30.120849 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"30a8debc-3590-46cb-9042-5cf8fe5a87d6","Type":"ContainerStarted","Data":"6d8a1a1b7f67d37349aa96297576c957f19aa2355ba6278b7de8ee2f912645a5"} Dec 06 05:49:31 crc kubenswrapper[4706]: I1206 05:49:31.130018 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"30a8debc-3590-46cb-9042-5cf8fe5a87d6","Type":"ContainerStarted","Data":"9bdee07a47c580c1724454e7e631ce79e97523051d25ba2cffbb22deb887f250"} Dec 06 05:49:31 crc kubenswrapper[4706]: I1206 05:49:31.130533 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:31 crc kubenswrapper[4706]: I1206 05:49:31.165551 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.165528886 podStartE2EDuration="2.165528886s" podCreationTimestamp="2025-12-06 05:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:31.145734061 +0000 UTC m=+1793.473558015" watchObservedRunningTime="2025-12-06 05:49:31.165528886 +0000 UTC m=+1793.493352830" Dec 06 05:49:32 crc kubenswrapper[4706]: I1206 05:49:32.663722 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 06 05:49:32 crc kubenswrapper[4706]: I1206 05:49:32.698715 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 06 05:49:33 crc kubenswrapper[4706]: I1206 05:49:33.188370 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 06 05:49:35 crc kubenswrapper[4706]: I1206 05:49:35.735546 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:49:35 crc kubenswrapper[4706]: I1206 05:49:35.737564 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:49:36 crc kubenswrapper[4706]: I1206 05:49:36.036204 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:49:36 crc kubenswrapper[4706]: E1206 05:49:36.036527 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:49:36 crc kubenswrapper[4706]: I1206 05:49:36.818282 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 06 05:49:36 crc kubenswrapper[4706]: I1206 05:49:36.818347 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.194:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 06 05:49:39 crc kubenswrapper[4706]: I1206 05:49:39.634432 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Dec 06 05:49:43 crc kubenswrapper[4706]: I1206 05:49:43.241533 4706 generic.go:334] "Generic (PLEG): container finished" podID="5dcecbbd-fabb-49a7-991c-073c0f1734cf" containerID="6eb7b5f5b433b36fbee084b102b39c645d3839994ca9d16aab07b400f4ea2218" exitCode=137 Dec 06 05:49:43 crc kubenswrapper[4706]: I1206 05:49:43.241635 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5dcecbbd-fabb-49a7-991c-073c0f1734cf","Type":"ContainerDied","Data":"6eb7b5f5b433b36fbee084b102b39c645d3839994ca9d16aab07b400f4ea2218"} Dec 06 05:49:43 crc kubenswrapper[4706]: I1206 05:49:43.245508 4706 generic.go:334] "Generic (PLEG): container finished" podID="7d1ef50a-124a-4647-96fb-42f625df6099" containerID="ed380301caaf69c9aae3c5438ada13bc88eba088ba60bbac53917756b61c7cc4" exitCode=137 Dec 06 05:49:43 crc kubenswrapper[4706]: I1206 05:49:43.245574 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7d1ef50a-124a-4647-96fb-42f625df6099","Type":"ContainerDied","Data":"ed380301caaf69c9aae3c5438ada13bc88eba088ba60bbac53917756b61c7cc4"} Dec 06 05:49:45 crc kubenswrapper[4706]: I1206 05:49:45.739190 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 06 05:49:45 crc kubenswrapper[4706]: I1206 05:49:45.740728 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 06 05:49:45 crc kubenswrapper[4706]: I1206 05:49:45.743351 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 06 05:49:45 crc kubenswrapper[4706]: I1206 05:49:45.744478 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.276741 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.282724 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.471586 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-f4k9q"] Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.473517 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.490060 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-f4k9q"] Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.635641 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zg8mk\" (UniqueName: \"kubernetes.io/projected/ad459e55-c5fb-42bc-8e86-af5e22355607-kube-api-access-zg8mk\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.635748 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-sb\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.635820 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-svc\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.635890 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-config\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.635912 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-nb\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.635991 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-swift-storage-0\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.737573 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zg8mk\" (UniqueName: \"kubernetes.io/projected/ad459e55-c5fb-42bc-8e86-af5e22355607-kube-api-access-zg8mk\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.737658 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-sb\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.737693 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-svc\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.737743 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-config\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.737759 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-nb\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.737818 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-swift-storage-0\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.738957 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-swift-storage-0\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.739695 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-svc\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.739955 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-config\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.739997 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-nb\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.743363 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-sb\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.762172 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zg8mk\" (UniqueName: \"kubernetes.io/projected/ad459e55-c5fb-42bc-8e86-af5e22355607-kube-api-access-zg8mk\") pod \"dnsmasq-dns-59cf4bdb65-f4k9q\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.809531 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.816462 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.851540 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940546 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2992k\" (UniqueName: \"kubernetes.io/projected/5dcecbbd-fabb-49a7-991c-073c0f1734cf-kube-api-access-2992k\") pod \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940623 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-combined-ca-bundle\") pod \"7d1ef50a-124a-4647-96fb-42f625df6099\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940734 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xls5p\" (UniqueName: \"kubernetes.io/projected/7d1ef50a-124a-4647-96fb-42f625df6099-kube-api-access-xls5p\") pod \"7d1ef50a-124a-4647-96fb-42f625df6099\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940783 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-config-data\") pod \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940848 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d1ef50a-124a-4647-96fb-42f625df6099-logs\") pod \"7d1ef50a-124a-4647-96fb-42f625df6099\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940875 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-config-data\") pod \"7d1ef50a-124a-4647-96fb-42f625df6099\" (UID: \"7d1ef50a-124a-4647-96fb-42f625df6099\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.940921 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-combined-ca-bundle\") pod \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\" (UID: \"5dcecbbd-fabb-49a7-991c-073c0f1734cf\") " Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.943996 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d1ef50a-124a-4647-96fb-42f625df6099-kube-api-access-xls5p" (OuterVolumeSpecName: "kube-api-access-xls5p") pod "7d1ef50a-124a-4647-96fb-42f625df6099" (UID: "7d1ef50a-124a-4647-96fb-42f625df6099"). InnerVolumeSpecName "kube-api-access-xls5p". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.945068 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d1ef50a-124a-4647-96fb-42f625df6099-logs" (OuterVolumeSpecName: "logs") pod "7d1ef50a-124a-4647-96fb-42f625df6099" (UID: "7d1ef50a-124a-4647-96fb-42f625df6099"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.946511 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dcecbbd-fabb-49a7-991c-073c0f1734cf-kube-api-access-2992k" (OuterVolumeSpecName: "kube-api-access-2992k") pod "5dcecbbd-fabb-49a7-991c-073c0f1734cf" (UID: "5dcecbbd-fabb-49a7-991c-073c0f1734cf"). InnerVolumeSpecName "kube-api-access-2992k". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.976212 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-config-data" (OuterVolumeSpecName: "config-data") pod "5dcecbbd-fabb-49a7-991c-073c0f1734cf" (UID: "5dcecbbd-fabb-49a7-991c-073c0f1734cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.981011 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5dcecbbd-fabb-49a7-991c-073c0f1734cf" (UID: "5dcecbbd-fabb-49a7-991c-073c0f1734cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:46 crc kubenswrapper[4706]: I1206 05:49:46.985228 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7d1ef50a-124a-4647-96fb-42f625df6099" (UID: "7d1ef50a-124a-4647-96fb-42f625df6099"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.013807 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-config-data" (OuterVolumeSpecName: "config-data") pod "7d1ef50a-124a-4647-96fb-42f625df6099" (UID: "7d1ef50a-124a-4647-96fb-42f625df6099"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042727 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xls5p\" (UniqueName: \"kubernetes.io/projected/7d1ef50a-124a-4647-96fb-42f625df6099-kube-api-access-xls5p\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042748 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042758 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7d1ef50a-124a-4647-96fb-42f625df6099-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042766 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042775 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcecbbd-fabb-49a7-991c-073c0f1734cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042784 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2992k\" (UniqueName: \"kubernetes.io/projected/5dcecbbd-fabb-49a7-991c-073c0f1734cf-kube-api-access-2992k\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.042791 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7d1ef50a-124a-4647-96fb-42f625df6099-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.286401 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"5dcecbbd-fabb-49a7-991c-073c0f1734cf","Type":"ContainerDied","Data":"e46903b4dfeeecb719919c5e776a1b63cd6cd9cafa5bdf0614fe2ccdad771add"} Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.286438 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.286468 4706 scope.go:117] "RemoveContainer" containerID="6eb7b5f5b433b36fbee084b102b39c645d3839994ca9d16aab07b400f4ea2218" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.289878 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.301153 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7d1ef50a-124a-4647-96fb-42f625df6099","Type":"ContainerDied","Data":"2414aa87701ec3c697a52cfd4a62b07e45a65243f2237eca0e177eac9c7f295a"} Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.319279 4706 scope.go:117] "RemoveContainer" containerID="ed380301caaf69c9aae3c5438ada13bc88eba088ba60bbac53917756b61c7cc4" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.343759 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.360185 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-f4k9q"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.368909 4706 scope.go:117] "RemoveContainer" containerID="fd3603e4e0a64d881904389c9b2ad86c9c266878c57a1d6ec6aae91d7633a746" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.379381 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.397864 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.409142 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.417123 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: E1206 05:49:47.417819 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-metadata" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.417845 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-metadata" Dec 06 05:49:47 crc kubenswrapper[4706]: E1206 05:49:47.417888 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dcecbbd-fabb-49a7-991c-073c0f1734cf" containerName="nova-cell1-novncproxy-novncproxy" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.417900 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dcecbbd-fabb-49a7-991c-073c0f1734cf" containerName="nova-cell1-novncproxy-novncproxy" Dec 06 05:49:47 crc kubenswrapper[4706]: E1206 05:49:47.417928 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-log" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.417959 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-log" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.419019 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-metadata" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.419080 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dcecbbd-fabb-49a7-991c-073c0f1734cf" containerName="nova-cell1-novncproxy-novncproxy" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.419129 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" containerName="nova-metadata-log" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.421349 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.425920 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.427175 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.431328 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.443520 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.445207 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.448572 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.448831 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.451471 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.454273 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.552726 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-config-data\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553081 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553131 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553203 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553226 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553263 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553285 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675005da-4197-468b-b62a-3182ca49693a-logs\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553302 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxsmk\" (UniqueName: \"kubernetes.io/projected/b4142f86-6823-4e49-9a0e-564cdf8d043b-kube-api-access-kxsmk\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553321 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.553346 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp9jw\" (UniqueName: \"kubernetes.io/projected/675005da-4197-468b-b62a-3182ca49693a-kube-api-access-zp9jw\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654559 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654616 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654639 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675005da-4197-468b-b62a-3182ca49693a-logs\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654655 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxsmk\" (UniqueName: \"kubernetes.io/projected/b4142f86-6823-4e49-9a0e-564cdf8d043b-kube-api-access-kxsmk\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654676 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654703 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp9jw\" (UniqueName: \"kubernetes.io/projected/675005da-4197-468b-b62a-3182ca49693a-kube-api-access-zp9jw\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654738 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-config-data\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654762 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654799 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.654863 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.655411 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675005da-4197-468b-b62a-3182ca49693a-logs\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.661499 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.662683 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.663183 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.663941 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.664597 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4142f86-6823-4e49-9a0e-564cdf8d043b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.665341 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-config-data\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.666485 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.677723 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxsmk\" (UniqueName: \"kubernetes.io/projected/b4142f86-6823-4e49-9a0e-564cdf8d043b-kube-api-access-kxsmk\") pod \"nova-cell1-novncproxy-0\" (UID: \"b4142f86-6823-4e49-9a0e-564cdf8d043b\") " pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.682331 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp9jw\" (UniqueName: \"kubernetes.io/projected/675005da-4197-468b-b62a-3182ca49693a-kube-api-access-zp9jw\") pod \"nova-metadata-0\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.740940 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:49:47 crc kubenswrapper[4706]: I1206 05:49:47.767137 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.051537 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:49:48 crc kubenswrapper[4706]: E1206 05:49:48.051824 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.066854 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dcecbbd-fabb-49a7-991c-073c0f1734cf" path="/var/lib/kubelet/pods/5dcecbbd-fabb-49a7-991c-073c0f1734cf/volumes" Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.067859 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d1ef50a-124a-4647-96fb-42f625df6099" path="/var/lib/kubelet/pods/7d1ef50a-124a-4647-96fb-42f625df6099/volumes" Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.204513 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:49:48 crc kubenswrapper[4706]: W1206 05:49:48.218509 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod675005da_4197_468b_b62a_3182ca49693a.slice/crio-3aa2bcdbd1133cf9ffd4d1066bfa6561f71989e01e2d6445a06abe6cbd241bf6 WatchSource:0}: Error finding container 3aa2bcdbd1133cf9ffd4d1066bfa6561f71989e01e2d6445a06abe6cbd241bf6: Status 404 returned error can't find the container with id 3aa2bcdbd1133cf9ffd4d1066bfa6561f71989e01e2d6445a06abe6cbd241bf6 Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.302800 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"675005da-4197-468b-b62a-3182ca49693a","Type":"ContainerStarted","Data":"3aa2bcdbd1133cf9ffd4d1066bfa6561f71989e01e2d6445a06abe6cbd241bf6"} Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.307398 4706 generic.go:334] "Generic (PLEG): container finished" podID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerID="0401c42c4a0f951f015e24de34691af48b08abe72636f9e57c5b99d401658074" exitCode=0 Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.307541 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" event={"ID":"ad459e55-c5fb-42bc-8e86-af5e22355607","Type":"ContainerDied","Data":"0401c42c4a0f951f015e24de34691af48b08abe72636f9e57c5b99d401658074"} Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.307573 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" event={"ID":"ad459e55-c5fb-42bc-8e86-af5e22355607","Type":"ContainerStarted","Data":"7bafdb5b59de68bf81052392d0087f9a8d2b37da9b3b7cd0d1a3a54ee4854ea7"} Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.490531 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Dec 06 05:49:48 crc kubenswrapper[4706]: W1206 05:49:48.498942 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4142f86_6823_4e49_9a0e_564cdf8d043b.slice/crio-b5bdc7be49b57a148fdf494710b7c8b0f861d8ecdc18e97fcf291668823da8c6 WatchSource:0}: Error finding container b5bdc7be49b57a148fdf494710b7c8b0f861d8ecdc18e97fcf291668823da8c6: Status 404 returned error can't find the container with id b5bdc7be49b57a148fdf494710b7c8b0f861d8ecdc18e97fcf291668823da8c6 Dec 06 05:49:48 crc kubenswrapper[4706]: I1206 05:49:48.683894 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.023950 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.149421 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.317243 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" event={"ID":"ad459e55-c5fb-42bc-8e86-af5e22355607","Type":"ContainerStarted","Data":"1f3a99267e795dff0557aaf03d838400653c1ee98d08e931b61dea17094f4259"} Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.317328 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.320346 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"675005da-4197-468b-b62a-3182ca49693a","Type":"ContainerStarted","Data":"700d0ea159f1b3485e859b6cfb02dca77aadcc2e902e5595898899900c600f1b"} Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.320397 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"675005da-4197-468b-b62a-3182ca49693a","Type":"ContainerStarted","Data":"5546afc44c4fd14cb62dc229d0144a43131b1e6f7da62bac1478b47d7239a8ad"} Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323207 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b4142f86-6823-4e49-9a0e-564cdf8d043b","Type":"ContainerStarted","Data":"2d04383747d44ceb7817f32e874a65c2eb18d7011e435a327294422f18200e20"} Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323258 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"b4142f86-6823-4e49-9a0e-564cdf8d043b","Type":"ContainerStarted","Data":"b5bdc7be49b57a148fdf494710b7c8b0f861d8ecdc18e97fcf291668823da8c6"} Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323347 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-log" containerID="cri-o://17e2f53a416c9e33b4c1335f2131a80449b693985765c8def2dbe4645b566c84" gracePeriod=30 Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323426 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-api" containerID="cri-o://21ef3c88a7c7c8c49077fb83fc9af81de9fd3ea7193bf6fc2d48deb4b03fa483" gracePeriod=30 Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323570 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-central-agent" containerID="cri-o://f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6" gracePeriod=30 Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323624 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-notification-agent" containerID="cri-o://eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c" gracePeriod=30 Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323588 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="proxy-httpd" containerID="cri-o://8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78" gracePeriod=30 Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.323607 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="sg-core" containerID="cri-o://2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d" gracePeriod=30 Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.346346 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" podStartSLOduration=3.346329237 podStartE2EDuration="3.346329237s" podCreationTimestamp="2025-12-06 05:49:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:49.341514016 +0000 UTC m=+1811.669337960" watchObservedRunningTime="2025-12-06 05:49:49.346329237 +0000 UTC m=+1811.674153181" Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.375361 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.37534084 podStartE2EDuration="2.37534084s" podCreationTimestamp="2025-12-06 05:49:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:49.367386255 +0000 UTC m=+1811.695210199" watchObservedRunningTime="2025-12-06 05:49:49.37534084 +0000 UTC m=+1811.703164794" Dec 06 05:49:49 crc kubenswrapper[4706]: I1206 05:49:49.395286 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.395264358 podStartE2EDuration="2.395264358s" podCreationTimestamp="2025-12-06 05:49:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:49.386337167 +0000 UTC m=+1811.714161101" watchObservedRunningTime="2025-12-06 05:49:49.395264358 +0000 UTC m=+1811.723088302" Dec 06 05:49:49 crc kubenswrapper[4706]: E1206 05:49:49.695838 4706 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda2849104_5ab6_4332_8bca_dc2f9d0cdec0.slice/crio-conmon-8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78.scope\": RecentStats: unable to find data in memory cache]" Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.338267 4706 generic.go:334] "Generic (PLEG): container finished" podID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerID="8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78" exitCode=0 Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.338594 4706 generic.go:334] "Generic (PLEG): container finished" podID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerID="2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d" exitCode=2 Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.338379 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerDied","Data":"8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78"} Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.338652 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerDied","Data":"2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d"} Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.338676 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerDied","Data":"f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6"} Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.338605 4706 generic.go:334] "Generic (PLEG): container finished" podID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerID="f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6" exitCode=0 Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.341947 4706 generic.go:334] "Generic (PLEG): container finished" podID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerID="17e2f53a416c9e33b4c1335f2131a80449b693985765c8def2dbe4645b566c84" exitCode=143 Dec 06 05:49:50 crc kubenswrapper[4706]: I1206 05:49:50.342135 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0131a4e1-1fee-4c16-a1a2-6d4f73d66051","Type":"ContainerDied","Data":"17e2f53a416c9e33b4c1335f2131a80449b693985765c8def2dbe4645b566c84"} Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.259519 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.335808 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-sg-core-conf-yaml\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.335857 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-run-httpd\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336039 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-combined-ca-bundle\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336088 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-scripts\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336163 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-log-httpd\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336196 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-config-data\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336262 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ksk9n\" (UniqueName: \"kubernetes.io/projected/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-kube-api-access-ksk9n\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336333 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-ceilometer-tls-certs\") pod \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\" (UID: \"a2849104-5ab6-4332-8bca-dc2f9d0cdec0\") " Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336394 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.336703 4706 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-run-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.337449 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.345144 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-scripts" (OuterVolumeSpecName: "scripts") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.345155 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-kube-api-access-ksk9n" (OuterVolumeSpecName: "kube-api-access-ksk9n") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "kube-api-access-ksk9n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.360324 4706 generic.go:334] "Generic (PLEG): container finished" podID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerID="eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c" exitCode=0 Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.360374 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerDied","Data":"eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c"} Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.360402 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a2849104-5ab6-4332-8bca-dc2f9d0cdec0","Type":"ContainerDied","Data":"ee8b575b3f8ed004dc17c066257a889bfcebeec34e269ecaa022035cc0448065"} Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.360419 4706 scope.go:117] "RemoveContainer" containerID="8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.360550 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.377001 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.392326 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.416577 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.438332 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.438362 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.438373 4706 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-log-httpd\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.438383 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ksk9n\" (UniqueName: \"kubernetes.io/projected/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-kube-api-access-ksk9n\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.438396 4706 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.438404 4706 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.462895 4706 scope.go:117] "RemoveContainer" containerID="2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.463145 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-config-data" (OuterVolumeSpecName: "config-data") pod "a2849104-5ab6-4332-8bca-dc2f9d0cdec0" (UID: "a2849104-5ab6-4332-8bca-dc2f9d0cdec0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.484755 4706 scope.go:117] "RemoveContainer" containerID="eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.503849 4706 scope.go:117] "RemoveContainer" containerID="f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.522867 4706 scope.go:117] "RemoveContainer" containerID="8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.523301 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78\": container with ID starting with 8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78 not found: ID does not exist" containerID="8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.523333 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78"} err="failed to get container status \"8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78\": rpc error: code = NotFound desc = could not find container \"8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78\": container with ID starting with 8ad98e5391a1d5b4cf5f3b6725630d10aefc8dde7ed6248a7dbc6c77660bab78 not found: ID does not exist" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.523353 4706 scope.go:117] "RemoveContainer" containerID="2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.523739 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d\": container with ID starting with 2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d not found: ID does not exist" containerID="2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.523779 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d"} err="failed to get container status \"2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d\": rpc error: code = NotFound desc = could not find container \"2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d\": container with ID starting with 2720e0f13f573f71c8f3feb45f9a6c46f2a00594fcf43d1eb5b13a30e0a21e1d not found: ID does not exist" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.523812 4706 scope.go:117] "RemoveContainer" containerID="eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.524343 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c\": container with ID starting with eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c not found: ID does not exist" containerID="eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.524366 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c"} err="failed to get container status \"eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c\": rpc error: code = NotFound desc = could not find container \"eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c\": container with ID starting with eb926dc6d340c479b03f1a245b6da7eb431b317a21eccbc50472856e3b1bb67c not found: ID does not exist" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.524382 4706 scope.go:117] "RemoveContainer" containerID="f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.524585 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6\": container with ID starting with f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6 not found: ID does not exist" containerID="f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.524607 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6"} err="failed to get container status \"f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6\": rpc error: code = NotFound desc = could not find container \"f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6\": container with ID starting with f8f087fdb1291af876147579fc874489954202de709f1f4d7b4b6229d89379a6 not found: ID does not exist" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.539912 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a2849104-5ab6-4332-8bca-dc2f9d0cdec0-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.692285 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.702285 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.715103 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.715800 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-notification-agent" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.715823 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-notification-agent" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.715835 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="sg-core" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.715841 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="sg-core" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.715862 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="proxy-httpd" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.715868 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="proxy-httpd" Dec 06 05:49:51 crc kubenswrapper[4706]: E1206 05:49:51.715883 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-central-agent" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.715889 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-central-agent" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.716135 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="sg-core" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.716164 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-notification-agent" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.716182 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="ceilometer-central-agent" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.716198 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" containerName="proxy-httpd" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.718463 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.729084 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.729147 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.729189 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.739807 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.846938 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl8ft\" (UniqueName: \"kubernetes.io/projected/b3b0627f-70db-4eb0-8d16-c93648772685-kube-api-access-vl8ft\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847320 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847389 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3b0627f-70db-4eb0-8d16-c93648772685-log-httpd\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847445 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3b0627f-70db-4eb0-8d16-c93648772685-run-httpd\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847606 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-config-data\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847663 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847684 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.847714 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-scripts\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949275 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3b0627f-70db-4eb0-8d16-c93648772685-log-httpd\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949343 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3b0627f-70db-4eb0-8d16-c93648772685-run-httpd\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949423 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-config-data\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949458 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949477 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949499 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-scripts\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949525 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl8ft\" (UniqueName: \"kubernetes.io/projected/b3b0627f-70db-4eb0-8d16-c93648772685-kube-api-access-vl8ft\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949542 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.949844 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3b0627f-70db-4eb0-8d16-c93648772685-log-httpd\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.950025 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b3b0627f-70db-4eb0-8d16-c93648772685-run-httpd\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.953396 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.954143 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-scripts\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.954191 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.957975 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.958962 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3b0627f-70db-4eb0-8d16-c93648772685-config-data\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:51 crc kubenswrapper[4706]: I1206 05:49:51.965451 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl8ft\" (UniqueName: \"kubernetes.io/projected/b3b0627f-70db-4eb0-8d16-c93648772685-kube-api-access-vl8ft\") pod \"ceilometer-0\" (UID: \"b3b0627f-70db-4eb0-8d16-c93648772685\") " pod="openstack/ceilometer-0" Dec 06 05:49:52 crc kubenswrapper[4706]: I1206 05:49:52.048015 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2849104-5ab6-4332-8bca-dc2f9d0cdec0" path="/var/lib/kubelet/pods/a2849104-5ab6-4332-8bca-dc2f9d0cdec0/volumes" Dec 06 05:49:52 crc kubenswrapper[4706]: I1206 05:49:52.053743 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Dec 06 05:49:52 crc kubenswrapper[4706]: I1206 05:49:52.465089 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Dec 06 05:49:52 crc kubenswrapper[4706]: W1206 05:49:52.466713 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3b0627f_70db_4eb0_8d16_c93648772685.slice/crio-d749222885706a1d2631f7e3facec78466e42e6d84ef6a273b9dfc498a71b6ec WatchSource:0}: Error finding container d749222885706a1d2631f7e3facec78466e42e6d84ef6a273b9dfc498a71b6ec: Status 404 returned error can't find the container with id d749222885706a1d2631f7e3facec78466e42e6d84ef6a273b9dfc498a71b6ec Dec 06 05:49:52 crc kubenswrapper[4706]: I1206 05:49:52.741197 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 06 05:49:52 crc kubenswrapper[4706]: I1206 05:49:52.741257 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 06 05:49:52 crc kubenswrapper[4706]: I1206 05:49:52.768723 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.402528 4706 generic.go:334] "Generic (PLEG): container finished" podID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerID="21ef3c88a7c7c8c49077fb83fc9af81de9fd3ea7193bf6fc2d48deb4b03fa483" exitCode=0 Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.403134 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0131a4e1-1fee-4c16-a1a2-6d4f73d66051","Type":"ContainerDied","Data":"21ef3c88a7c7c8c49077fb83fc9af81de9fd3ea7193bf6fc2d48deb4b03fa483"} Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.406212 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3b0627f-70db-4eb0-8d16-c93648772685","Type":"ContainerStarted","Data":"25d95127e116c7dc9bd7e7e046e596e6c9b5bc398f32b9d5abd572b6f5cabed3"} Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.406301 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3b0627f-70db-4eb0-8d16-c93648772685","Type":"ContainerStarted","Data":"d749222885706a1d2631f7e3facec78466e42e6d84ef6a273b9dfc498a71b6ec"} Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.525411 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.600210 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qql2\" (UniqueName: \"kubernetes.io/projected/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-kube-api-access-6qql2\") pod \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.600409 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-config-data\") pod \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.600533 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-logs\") pod \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.600578 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-combined-ca-bundle\") pod \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\" (UID: \"0131a4e1-1fee-4c16-a1a2-6d4f73d66051\") " Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.601361 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-logs" (OuterVolumeSpecName: "logs") pod "0131a4e1-1fee-4c16-a1a2-6d4f73d66051" (UID: "0131a4e1-1fee-4c16-a1a2-6d4f73d66051"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.610242 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-kube-api-access-6qql2" (OuterVolumeSpecName: "kube-api-access-6qql2") pod "0131a4e1-1fee-4c16-a1a2-6d4f73d66051" (UID: "0131a4e1-1fee-4c16-a1a2-6d4f73d66051"). InnerVolumeSpecName "kube-api-access-6qql2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.636664 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-config-data" (OuterVolumeSpecName: "config-data") pod "0131a4e1-1fee-4c16-a1a2-6d4f73d66051" (UID: "0131a4e1-1fee-4c16-a1a2-6d4f73d66051"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.649706 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0131a4e1-1fee-4c16-a1a2-6d4f73d66051" (UID: "0131a4e1-1fee-4c16-a1a2-6d4f73d66051"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.703016 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.703061 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.703073 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:53 crc kubenswrapper[4706]: I1206 05:49:53.703086 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qql2\" (UniqueName: \"kubernetes.io/projected/0131a4e1-1fee-4c16-a1a2-6d4f73d66051-kube-api-access-6qql2\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.416522 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"0131a4e1-1fee-4c16-a1a2-6d4f73d66051","Type":"ContainerDied","Data":"399b6a1c5c1c1e2a77cd046323e2816b6260b935199421f077bfa17533322069"} Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.416867 4706 scope.go:117] "RemoveContainer" containerID="21ef3c88a7c7c8c49077fb83fc9af81de9fd3ea7193bf6fc2d48deb4b03fa483" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.416560 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.438184 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.440793 4706 scope.go:117] "RemoveContainer" containerID="17e2f53a416c9e33b4c1335f2131a80449b693985765c8def2dbe4645b566c84" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.446585 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.469618 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:54 crc kubenswrapper[4706]: E1206 05:49:54.470150 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-log" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.470169 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-log" Dec 06 05:49:54 crc kubenswrapper[4706]: E1206 05:49:54.470217 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-api" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.470225 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-api" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.470391 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-log" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.470412 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" containerName="nova-api-api" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.471546 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.481138 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.481183 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.481205 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.496273 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.622740 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-internal-tls-certs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.622794 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-public-tls-certs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.622822 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-config-data\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.622852 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.622872 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7n7fm\" (UniqueName: \"kubernetes.io/projected/766ae73e-0923-4626-bb2d-03a302967827-kube-api-access-7n7fm\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.622939 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/766ae73e-0923-4626-bb2d-03a302967827-logs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.724840 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/766ae73e-0923-4626-bb2d-03a302967827-logs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.724966 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-internal-tls-certs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.724992 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-public-tls-certs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.725018 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-config-data\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.725065 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.725092 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7n7fm\" (UniqueName: \"kubernetes.io/projected/766ae73e-0923-4626-bb2d-03a302967827-kube-api-access-7n7fm\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.725874 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/766ae73e-0923-4626-bb2d-03a302967827-logs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.731552 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-public-tls-certs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.731734 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.732012 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-config-data\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.743280 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-internal-tls-certs\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.748155 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7n7fm\" (UniqueName: \"kubernetes.io/projected/766ae73e-0923-4626-bb2d-03a302967827-kube-api-access-7n7fm\") pod \"nova-api-0\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " pod="openstack/nova-api-0" Dec 06 05:49:54 crc kubenswrapper[4706]: I1206 05:49:54.792415 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:49:55 crc kubenswrapper[4706]: W1206 05:49:55.241345 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod766ae73e_0923_4626_bb2d_03a302967827.slice/crio-3135ad725dd427b97ac7955e3de3e43f9f2c66d3113dade5f34b3d2b8c9c028d WatchSource:0}: Error finding container 3135ad725dd427b97ac7955e3de3e43f9f2c66d3113dade5f34b3d2b8c9c028d: Status 404 returned error can't find the container with id 3135ad725dd427b97ac7955e3de3e43f9f2c66d3113dade5f34b3d2b8c9c028d Dec 06 05:49:55 crc kubenswrapper[4706]: I1206 05:49:55.252794 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:49:55 crc kubenswrapper[4706]: I1206 05:49:55.448619 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"766ae73e-0923-4626-bb2d-03a302967827","Type":"ContainerStarted","Data":"3135ad725dd427b97ac7955e3de3e43f9f2c66d3113dade5f34b3d2b8c9c028d"} Dec 06 05:49:55 crc kubenswrapper[4706]: I1206 05:49:55.452065 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3b0627f-70db-4eb0-8d16-c93648772685","Type":"ContainerStarted","Data":"9b4114e1fad60f21e1c8d9c9c4ba8458aa6b26f32b68467527c7a65e25f04eec"} Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.050797 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0131a4e1-1fee-4c16-a1a2-6d4f73d66051" path="/var/lib/kubelet/pods/0131a4e1-1fee-4c16-a1a2-6d4f73d66051/volumes" Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.464196 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3b0627f-70db-4eb0-8d16-c93648772685","Type":"ContainerStarted","Data":"1dd1af95a2bc616f5be526da023f6cbee70b099ab494048cafb6b102ca8eb680"} Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.465956 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"766ae73e-0923-4626-bb2d-03a302967827","Type":"ContainerStarted","Data":"6b32958c8181d8904c04d0094cf45e09dace4b0fbd403f55be01a9666ec80973"} Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.466008 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"766ae73e-0923-4626-bb2d-03a302967827","Type":"ContainerStarted","Data":"a39a16d81462778b976ac7ac5cbc5854397837dc526d5dfdfd916ad68a60163b"} Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.492769 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.492745678 podStartE2EDuration="2.492745678s" podCreationTimestamp="2025-12-06 05:49:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:49:56.484719351 +0000 UTC m=+1818.812543305" watchObservedRunningTime="2025-12-06 05:49:56.492745678 +0000 UTC m=+1818.820569622" Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.853274 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.913430 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-6hcmg"] Dec 06 05:49:56 crc kubenswrapper[4706]: I1206 05:49:56.913674 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerName="dnsmasq-dns" containerID="cri-o://d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1" gracePeriod=10 Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.477863 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.478097 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b3b0627f-70db-4eb0-8d16-c93648772685","Type":"ContainerStarted","Data":"832fddca0f372e8dac53aa06a524b63388f03da98b2db510d1abdba2b4f6a87c"} Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.478574 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.481263 4706 generic.go:334] "Generic (PLEG): container finished" podID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerID="d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1" exitCode=0 Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.481351 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" event={"ID":"83c5c36d-bae3-4ca2-a542-7223116168e1","Type":"ContainerDied","Data":"d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1"} Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.481390 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" event={"ID":"83c5c36d-bae3-4ca2-a542-7223116168e1","Type":"ContainerDied","Data":"60fc26eaf45c856f8c91a3934a616fc127cadcc07c428b8e7d38fabe6e54fd01"} Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.481458 4706 scope.go:117] "RemoveContainer" containerID="d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.505618 4706 scope.go:117] "RemoveContainer" containerID="a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.516803 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.060322245 podStartE2EDuration="6.516783411s" podCreationTimestamp="2025-12-06 05:49:51 +0000 UTC" firstStartedPulling="2025-12-06 05:49:52.469131884 +0000 UTC m=+1814.796955828" lastFinishedPulling="2025-12-06 05:49:56.92559305 +0000 UTC m=+1819.253416994" observedRunningTime="2025-12-06 05:49:57.503415979 +0000 UTC m=+1819.831239923" watchObservedRunningTime="2025-12-06 05:49:57.516783411 +0000 UTC m=+1819.844607345" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.546535 4706 scope.go:117] "RemoveContainer" containerID="d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1" Dec 06 05:49:57 crc kubenswrapper[4706]: E1206 05:49:57.548081 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1\": container with ID starting with d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1 not found: ID does not exist" containerID="d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.548135 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1"} err="failed to get container status \"d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1\": rpc error: code = NotFound desc = could not find container \"d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1\": container with ID starting with d5e3999ecbcbc76576ace37635d7636fbed71940758ab9eddf95651b344f28f1 not found: ID does not exist" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.548159 4706 scope.go:117] "RemoveContainer" containerID="a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf" Dec 06 05:49:57 crc kubenswrapper[4706]: E1206 05:49:57.548477 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf\": container with ID starting with a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf not found: ID does not exist" containerID="a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.548506 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf"} err="failed to get container status \"a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf\": rpc error: code = NotFound desc = could not find container \"a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf\": container with ID starting with a86dffa37d88c2ffe5dd4f730bf95faeecc0edb03fc68c3f2508c432234bbbcf not found: ID does not exist" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.587333 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-swift-storage-0\") pod \"83c5c36d-bae3-4ca2-a542-7223116168e1\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.587408 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-nb\") pod \"83c5c36d-bae3-4ca2-a542-7223116168e1\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.588513 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-config\") pod \"83c5c36d-bae3-4ca2-a542-7223116168e1\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.588601 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-sb\") pod \"83c5c36d-bae3-4ca2-a542-7223116168e1\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.588644 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk95q\" (UniqueName: \"kubernetes.io/projected/83c5c36d-bae3-4ca2-a542-7223116168e1-kube-api-access-hk95q\") pod \"83c5c36d-bae3-4ca2-a542-7223116168e1\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.588714 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-svc\") pod \"83c5c36d-bae3-4ca2-a542-7223116168e1\" (UID: \"83c5c36d-bae3-4ca2-a542-7223116168e1\") " Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.615936 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83c5c36d-bae3-4ca2-a542-7223116168e1-kube-api-access-hk95q" (OuterVolumeSpecName: "kube-api-access-hk95q") pod "83c5c36d-bae3-4ca2-a542-7223116168e1" (UID: "83c5c36d-bae3-4ca2-a542-7223116168e1"). InnerVolumeSpecName "kube-api-access-hk95q". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.700376 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk95q\" (UniqueName: \"kubernetes.io/projected/83c5c36d-bae3-4ca2-a542-7223116168e1-kube-api-access-hk95q\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.738991 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-config" (OuterVolumeSpecName: "config") pod "83c5c36d-bae3-4ca2-a542-7223116168e1" (UID: "83c5c36d-bae3-4ca2-a542-7223116168e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.741027 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.741166 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.743459 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "83c5c36d-bae3-4ca2-a542-7223116168e1" (UID: "83c5c36d-bae3-4ca2-a542-7223116168e1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.751579 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "83c5c36d-bae3-4ca2-a542-7223116168e1" (UID: "83c5c36d-bae3-4ca2-a542-7223116168e1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.754560 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "83c5c36d-bae3-4ca2-a542-7223116168e1" (UID: "83c5c36d-bae3-4ca2-a542-7223116168e1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.763675 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "83c5c36d-bae3-4ca2-a542-7223116168e1" (UID: "83c5c36d-bae3-4ca2-a542-7223116168e1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.768580 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.802183 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.802219 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.802233 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.802244 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.802258 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83c5c36d-bae3-4ca2-a542-7223116168e1-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:49:57 crc kubenswrapper[4706]: I1206 05:49:57.804139 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.490609 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-6hcmg" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.517173 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-6hcmg"] Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.527681 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.529265 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-6hcmg"] Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.698166 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-j8txv"] Dec 06 05:49:58 crc kubenswrapper[4706]: E1206 05:49:58.698681 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerName="dnsmasq-dns" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.698704 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerName="dnsmasq-dns" Dec 06 05:49:58 crc kubenswrapper[4706]: E1206 05:49:58.698751 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerName="init" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.698761 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerName="init" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.699023 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" containerName="dnsmasq-dns" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.700410 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.702442 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.706581 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-j8txv"] Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.710505 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.755264 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.755298 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.822164 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2qvj7\" (UniqueName: \"kubernetes.io/projected/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-kube-api-access-2qvj7\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.822247 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-config-data\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.822294 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-scripts\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.822427 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.924273 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2qvj7\" (UniqueName: \"kubernetes.io/projected/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-kube-api-access-2qvj7\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.924344 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-config-data\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.924381 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-scripts\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.924448 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.932115 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-scripts\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.932562 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.934021 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-config-data\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:58 crc kubenswrapper[4706]: I1206 05:49:58.945496 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2qvj7\" (UniqueName: \"kubernetes.io/projected/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-kube-api-access-2qvj7\") pod \"nova-cell1-cell-mapping-j8txv\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:59 crc kubenswrapper[4706]: I1206 05:49:59.028449 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:49:59 crc kubenswrapper[4706]: I1206 05:49:59.562669 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-j8txv"] Dec 06 05:49:59 crc kubenswrapper[4706]: W1206 05:49:59.564923 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb7cced43_4fd9_4594_afd8_9c3e1ce7bb69.slice/crio-ff1e248c9f3a0ae099273296c30cde4f144f7078c418e0e1eb501cf7b8a11da2 WatchSource:0}: Error finding container ff1e248c9f3a0ae099273296c30cde4f144f7078c418e0e1eb501cf7b8a11da2: Status 404 returned error can't find the container with id ff1e248c9f3a0ae099273296c30cde4f144f7078c418e0e1eb501cf7b8a11da2 Dec 06 05:50:00 crc kubenswrapper[4706]: I1206 05:50:00.044815 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:50:00 crc kubenswrapper[4706]: E1206 05:50:00.045081 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:50:00 crc kubenswrapper[4706]: I1206 05:50:00.046575 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83c5c36d-bae3-4ca2-a542-7223116168e1" path="/var/lib/kubelet/pods/83c5c36d-bae3-4ca2-a542-7223116168e1/volumes" Dec 06 05:50:00 crc kubenswrapper[4706]: I1206 05:50:00.512415 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j8txv" event={"ID":"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69","Type":"ContainerStarted","Data":"b28219acfb02a2c4e23ef378f9824558b3ae988d58297dac54f350c94efd63db"} Dec 06 05:50:00 crc kubenswrapper[4706]: I1206 05:50:00.512466 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j8txv" event={"ID":"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69","Type":"ContainerStarted","Data":"ff1e248c9f3a0ae099273296c30cde4f144f7078c418e0e1eb501cf7b8a11da2"} Dec 06 05:50:01 crc kubenswrapper[4706]: I1206 05:50:01.550675 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-j8txv" podStartSLOduration=3.55064525 podStartE2EDuration="3.55064525s" podCreationTimestamp="2025-12-06 05:49:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:50:01.537838724 +0000 UTC m=+1823.865662688" watchObservedRunningTime="2025-12-06 05:50:01.55064525 +0000 UTC m=+1823.878469204" Dec 06 05:50:04 crc kubenswrapper[4706]: I1206 05:50:04.556679 4706 generic.go:334] "Generic (PLEG): container finished" podID="b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" containerID="b28219acfb02a2c4e23ef378f9824558b3ae988d58297dac54f350c94efd63db" exitCode=0 Dec 06 05:50:04 crc kubenswrapper[4706]: I1206 05:50:04.556822 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j8txv" event={"ID":"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69","Type":"ContainerDied","Data":"b28219acfb02a2c4e23ef378f9824558b3ae988d58297dac54f350c94efd63db"} Dec 06 05:50:04 crc kubenswrapper[4706]: I1206 05:50:04.793598 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:50:04 crc kubenswrapper[4706]: I1206 05:50:04.793670 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:50:05 crc kubenswrapper[4706]: I1206 05:50:05.819187 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:50:05 crc kubenswrapper[4706]: I1206 05:50:05.819756 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.034349 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.208305 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2qvj7\" (UniqueName: \"kubernetes.io/projected/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-kube-api-access-2qvj7\") pod \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.208425 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-combined-ca-bundle\") pod \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.208527 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-config-data\") pod \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.208590 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-scripts\") pod \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\" (UID: \"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69\") " Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.222504 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-kube-api-access-2qvj7" (OuterVolumeSpecName: "kube-api-access-2qvj7") pod "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" (UID: "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69"). InnerVolumeSpecName "kube-api-access-2qvj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.223642 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-scripts" (OuterVolumeSpecName: "scripts") pod "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" (UID: "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.248184 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-config-data" (OuterVolumeSpecName: "config-data") pod "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" (UID: "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.261499 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" (UID: "b7cced43-4fd9-4594-afd8-9c3e1ce7bb69"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.311017 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.311062 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.311073 4706 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-scripts\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.311083 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2qvj7\" (UniqueName: \"kubernetes.io/projected/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69-kube-api-access-2qvj7\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.575474 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-j8txv" event={"ID":"b7cced43-4fd9-4594-afd8-9c3e1ce7bb69","Type":"ContainerDied","Data":"ff1e248c9f3a0ae099273296c30cde4f144f7078c418e0e1eb501cf7b8a11da2"} Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.575515 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ff1e248c9f3a0ae099273296c30cde4f144f7078c418e0e1eb501cf7b8a11da2" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.575528 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-j8txv" Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.756784 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.757114 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-log" containerID="cri-o://a39a16d81462778b976ac7ac5cbc5854397837dc526d5dfdfd916ad68a60163b" gracePeriod=30 Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.757222 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-api" containerID="cri-o://6b32958c8181d8904c04d0094cf45e09dace4b0fbd403f55be01a9666ec80973" gracePeriod=30 Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.770526 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.770731 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0d69c3f0-9226-4083-87a6-69e589b0869b" containerName="nova-scheduler-scheduler" containerID="cri-o://ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" gracePeriod=30 Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.830074 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.830389 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-log" containerID="cri-o://5546afc44c4fd14cb62dc229d0144a43131b1e6f7da62bac1478b47d7239a8ad" gracePeriod=30 Dec 06 05:50:06 crc kubenswrapper[4706]: I1206 05:50:06.830449 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-metadata" containerID="cri-o://700d0ea159f1b3485e859b6cfb02dca77aadcc2e902e5595898899900c600f1b" gracePeriod=30 Dec 06 05:50:07 crc kubenswrapper[4706]: E1206 05:50:07.665399 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 06 05:50:07 crc kubenswrapper[4706]: E1206 05:50:07.667008 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 06 05:50:07 crc kubenswrapper[4706]: E1206 05:50:07.668521 4706 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Dec 06 05:50:07 crc kubenswrapper[4706]: E1206 05:50:07.668563 4706 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0d69c3f0-9226-4083-87a6-69e589b0869b" containerName="nova-scheduler-scheduler" Dec 06 05:50:08 crc kubenswrapper[4706]: I1206 05:50:08.592397 4706 generic.go:334] "Generic (PLEG): container finished" podID="766ae73e-0923-4626-bb2d-03a302967827" containerID="a39a16d81462778b976ac7ac5cbc5854397837dc526d5dfdfd916ad68a60163b" exitCode=143 Dec 06 05:50:08 crc kubenswrapper[4706]: I1206 05:50:08.592473 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"766ae73e-0923-4626-bb2d-03a302967827","Type":"ContainerDied","Data":"a39a16d81462778b976ac7ac5cbc5854397837dc526d5dfdfd916ad68a60163b"} Dec 06 05:50:08 crc kubenswrapper[4706]: I1206 05:50:08.595057 4706 generic.go:334] "Generic (PLEG): container finished" podID="675005da-4197-468b-b62a-3182ca49693a" containerID="5546afc44c4fd14cb62dc229d0144a43131b1e6f7da62bac1478b47d7239a8ad" exitCode=143 Dec 06 05:50:08 crc kubenswrapper[4706]: I1206 05:50:08.595086 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"675005da-4197-468b-b62a-3182ca49693a","Type":"ContainerDied","Data":"5546afc44c4fd14cb62dc229d0144a43131b1e6f7da62bac1478b47d7239a8ad"} Dec 06 05:50:10 crc kubenswrapper[4706]: I1206 05:50:10.612328 4706 generic.go:334] "Generic (PLEG): container finished" podID="675005da-4197-468b-b62a-3182ca49693a" containerID="700d0ea159f1b3485e859b6cfb02dca77aadcc2e902e5595898899900c600f1b" exitCode=0 Dec 06 05:50:10 crc kubenswrapper[4706]: I1206 05:50:10.612651 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"675005da-4197-468b-b62a-3182ca49693a","Type":"ContainerDied","Data":"700d0ea159f1b3485e859b6cfb02dca77aadcc2e902e5595898899900c600f1b"} Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.248541 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.343593 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-config-data\") pod \"675005da-4197-468b-b62a-3182ca49693a\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.343731 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675005da-4197-468b-b62a-3182ca49693a-logs\") pod \"675005da-4197-468b-b62a-3182ca49693a\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.343806 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zp9jw\" (UniqueName: \"kubernetes.io/projected/675005da-4197-468b-b62a-3182ca49693a-kube-api-access-zp9jw\") pod \"675005da-4197-468b-b62a-3182ca49693a\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.344266 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/675005da-4197-468b-b62a-3182ca49693a-logs" (OuterVolumeSpecName: "logs") pod "675005da-4197-468b-b62a-3182ca49693a" (UID: "675005da-4197-468b-b62a-3182ca49693a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.344567 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-nova-metadata-tls-certs\") pod \"675005da-4197-468b-b62a-3182ca49693a\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.344654 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-combined-ca-bundle\") pod \"675005da-4197-468b-b62a-3182ca49693a\" (UID: \"675005da-4197-468b-b62a-3182ca49693a\") " Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.345174 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/675005da-4197-468b-b62a-3182ca49693a-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.349451 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/675005da-4197-468b-b62a-3182ca49693a-kube-api-access-zp9jw" (OuterVolumeSpecName: "kube-api-access-zp9jw") pod "675005da-4197-468b-b62a-3182ca49693a" (UID: "675005da-4197-468b-b62a-3182ca49693a"). InnerVolumeSpecName "kube-api-access-zp9jw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.376568 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "675005da-4197-468b-b62a-3182ca49693a" (UID: "675005da-4197-468b-b62a-3182ca49693a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.378911 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-config-data" (OuterVolumeSpecName: "config-data") pod "675005da-4197-468b-b62a-3182ca49693a" (UID: "675005da-4197-468b-b62a-3182ca49693a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.401993 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "675005da-4197-468b-b62a-3182ca49693a" (UID: "675005da-4197-468b-b62a-3182ca49693a"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.446702 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.446729 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.446739 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zp9jw\" (UniqueName: \"kubernetes.io/projected/675005da-4197-468b-b62a-3182ca49693a-kube-api-access-zp9jw\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.446750 4706 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/675005da-4197-468b-b62a-3182ca49693a-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.644845 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.645559 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"675005da-4197-468b-b62a-3182ca49693a","Type":"ContainerDied","Data":"3aa2bcdbd1133cf9ffd4d1066bfa6561f71989e01e2d6445a06abe6cbd241bf6"} Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.646926 4706 scope.go:117] "RemoveContainer" containerID="700d0ea159f1b3485e859b6cfb02dca77aadcc2e902e5595898899900c600f1b" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.647210 4706 generic.go:334] "Generic (PLEG): container finished" podID="766ae73e-0923-4626-bb2d-03a302967827" containerID="6b32958c8181d8904c04d0094cf45e09dace4b0fbd403f55be01a9666ec80973" exitCode=0 Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.647255 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"766ae73e-0923-4626-bb2d-03a302967827","Type":"ContainerDied","Data":"6b32958c8181d8904c04d0094cf45e09dace4b0fbd403f55be01a9666ec80973"} Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.681538 4706 scope.go:117] "RemoveContainer" containerID="5546afc44c4fd14cb62dc229d0144a43131b1e6f7da62bac1478b47d7239a8ad" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.689484 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.706154 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.719233 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:50:11 crc kubenswrapper[4706]: E1206 05:50:11.719686 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" containerName="nova-manage" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.719708 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" containerName="nova-manage" Dec 06 05:50:11 crc kubenswrapper[4706]: E1206 05:50:11.719734 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-metadata" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.719742 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-metadata" Dec 06 05:50:11 crc kubenswrapper[4706]: E1206 05:50:11.719764 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-log" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.719771 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-log" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.719987 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-log" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.720008 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="675005da-4197-468b-b62a-3182ca49693a" containerName="nova-metadata-metadata" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.720023 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" containerName="nova-manage" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.721436 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.723470 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.726227 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.741929 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.751477 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.751699 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-config-data\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.751915 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.751967 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/784eb2e8-d56e-4523-86cf-b67f953db54d-logs\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.752110 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxq5z\" (UniqueName: \"kubernetes.io/projected/784eb2e8-d56e-4523-86cf-b67f953db54d-kube-api-access-dxq5z\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.855380 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.855465 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-config-data\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.855510 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.855525 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/784eb2e8-d56e-4523-86cf-b67f953db54d-logs\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.855570 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxq5z\" (UniqueName: \"kubernetes.io/projected/784eb2e8-d56e-4523-86cf-b67f953db54d-kube-api-access-dxq5z\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.856633 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/784eb2e8-d56e-4523-86cf-b67f953db54d-logs\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.860556 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.863658 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-config-data\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.863760 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/784eb2e8-d56e-4523-86cf-b67f953db54d-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:11 crc kubenswrapper[4706]: I1206 05:50:11.873180 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxq5z\" (UniqueName: \"kubernetes.io/projected/784eb2e8-d56e-4523-86cf-b67f953db54d-kube-api-access-dxq5z\") pod \"nova-metadata-0\" (UID: \"784eb2e8-d56e-4523-86cf-b67f953db54d\") " pod="openstack/nova-metadata-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.044923 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.046924 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="675005da-4197-468b-b62a-3182ca49693a" path="/var/lib/kubelet/pods/675005da-4197-468b-b62a-3182ca49693a/volumes" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.206617 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.262400 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-internal-tls-certs\") pod \"766ae73e-0923-4626-bb2d-03a302967827\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.262572 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7n7fm\" (UniqueName: \"kubernetes.io/projected/766ae73e-0923-4626-bb2d-03a302967827-kube-api-access-7n7fm\") pod \"766ae73e-0923-4626-bb2d-03a302967827\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.262608 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-combined-ca-bundle\") pod \"766ae73e-0923-4626-bb2d-03a302967827\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.263284 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-public-tls-certs\") pod \"766ae73e-0923-4626-bb2d-03a302967827\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.263318 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-config-data\") pod \"766ae73e-0923-4626-bb2d-03a302967827\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.263358 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/766ae73e-0923-4626-bb2d-03a302967827-logs\") pod \"766ae73e-0923-4626-bb2d-03a302967827\" (UID: \"766ae73e-0923-4626-bb2d-03a302967827\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.264018 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/766ae73e-0923-4626-bb2d-03a302967827-logs" (OuterVolumeSpecName: "logs") pod "766ae73e-0923-4626-bb2d-03a302967827" (UID: "766ae73e-0923-4626-bb2d-03a302967827"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.279999 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/766ae73e-0923-4626-bb2d-03a302967827-kube-api-access-7n7fm" (OuterVolumeSpecName: "kube-api-access-7n7fm") pod "766ae73e-0923-4626-bb2d-03a302967827" (UID: "766ae73e-0923-4626-bb2d-03a302967827"). InnerVolumeSpecName "kube-api-access-7n7fm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.294724 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "766ae73e-0923-4626-bb2d-03a302967827" (UID: "766ae73e-0923-4626-bb2d-03a302967827"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.299456 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-config-data" (OuterVolumeSpecName: "config-data") pod "766ae73e-0923-4626-bb2d-03a302967827" (UID: "766ae73e-0923-4626-bb2d-03a302967827"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.341997 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "766ae73e-0923-4626-bb2d-03a302967827" (UID: "766ae73e-0923-4626-bb2d-03a302967827"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.347004 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "766ae73e-0923-4626-bb2d-03a302967827" (UID: "766ae73e-0923-4626-bb2d-03a302967827"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.365981 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7n7fm\" (UniqueName: \"kubernetes.io/projected/766ae73e-0923-4626-bb2d-03a302967827-kube-api-access-7n7fm\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.366005 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.366031 4706 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-public-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.366068 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.366086 4706 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/766ae73e-0923-4626-bb2d-03a302967827-logs\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.366099 4706 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/766ae73e-0923-4626-bb2d-03a302967827-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.541633 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Dec 06 05:50:12 crc kubenswrapper[4706]: W1206 05:50:12.541932 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod784eb2e8_d56e_4523_86cf_b67f953db54d.slice/crio-0ba1cff12a54f61f077eaf7743bc91c5b69e16949b0de3e07066d405ccb51a50 WatchSource:0}: Error finding container 0ba1cff12a54f61f077eaf7743bc91c5b69e16949b0de3e07066d405ccb51a50: Status 404 returned error can't find the container with id 0ba1cff12a54f61f077eaf7743bc91c5b69e16949b0de3e07066d405ccb51a50 Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.575292 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.658456 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"766ae73e-0923-4626-bb2d-03a302967827","Type":"ContainerDied","Data":"3135ad725dd427b97ac7955e3de3e43f9f2c66d3113dade5f34b3d2b8c9c028d"} Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.658480 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.658500 4706 scope.go:117] "RemoveContainer" containerID="6b32958c8181d8904c04d0094cf45e09dace4b0fbd403f55be01a9666ec80973" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.665276 4706 generic.go:334] "Generic (PLEG): container finished" podID="0d69c3f0-9226-4083-87a6-69e589b0869b" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" exitCode=0 Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.665311 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d69c3f0-9226-4083-87a6-69e589b0869b","Type":"ContainerDied","Data":"ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37"} Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.665277 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.665337 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0d69c3f0-9226-4083-87a6-69e589b0869b","Type":"ContainerDied","Data":"9cebe9321928512d67539413a8d06c13497dcf065df8ac83d65c09e3d27d54e9"} Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.666447 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"784eb2e8-d56e-4523-86cf-b67f953db54d","Type":"ContainerStarted","Data":"0ba1cff12a54f61f077eaf7743bc91c5b69e16949b0de3e07066d405ccb51a50"} Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.672958 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-config-data\") pod \"0d69c3f0-9226-4083-87a6-69e589b0869b\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.673140 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2g2b4\" (UniqueName: \"kubernetes.io/projected/0d69c3f0-9226-4083-87a6-69e589b0869b-kube-api-access-2g2b4\") pod \"0d69c3f0-9226-4083-87a6-69e589b0869b\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.673189 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-combined-ca-bundle\") pod \"0d69c3f0-9226-4083-87a6-69e589b0869b\" (UID: \"0d69c3f0-9226-4083-87a6-69e589b0869b\") " Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.678182 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d69c3f0-9226-4083-87a6-69e589b0869b-kube-api-access-2g2b4" (OuterVolumeSpecName: "kube-api-access-2g2b4") pod "0d69c3f0-9226-4083-87a6-69e589b0869b" (UID: "0d69c3f0-9226-4083-87a6-69e589b0869b"). InnerVolumeSpecName "kube-api-access-2g2b4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.713642 4706 scope.go:117] "RemoveContainer" containerID="a39a16d81462778b976ac7ac5cbc5854397837dc526d5dfdfd916ad68a60163b" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.725448 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.732842 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-config-data" (OuterVolumeSpecName: "config-data") pod "0d69c3f0-9226-4083-87a6-69e589b0869b" (UID: "0d69c3f0-9226-4083-87a6-69e589b0869b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.736487 4706 scope.go:117] "RemoveContainer" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.755228 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.755283 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Dec 06 05:50:12 crc kubenswrapper[4706]: E1206 05:50:12.755699 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-log" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.755718 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-log" Dec 06 05:50:12 crc kubenswrapper[4706]: E1206 05:50:12.755733 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-api" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.755739 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-api" Dec 06 05:50:12 crc kubenswrapper[4706]: E1206 05:50:12.755780 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d69c3f0-9226-4083-87a6-69e589b0869b" containerName="nova-scheduler-scheduler" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.755786 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d69c3f0-9226-4083-87a6-69e589b0869b" containerName="nova-scheduler-scheduler" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.755996 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-log" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.756023 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d69c3f0-9226-4083-87a6-69e589b0869b" containerName="nova-scheduler-scheduler" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.756038 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="766ae73e-0923-4626-bb2d-03a302967827" containerName="nova-api-api" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.756981 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.759418 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.761914 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.762075 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.765661 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776246 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-config-data\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776345 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-public-tls-certs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776394 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776454 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-internal-tls-certs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776540 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/085d0127-557c-49a2-80f4-2a86fed685cc-logs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776564 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq7zd\" (UniqueName: \"kubernetes.io/projected/085d0127-557c-49a2-80f4-2a86fed685cc-kube-api-access-bq7zd\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776646 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2g2b4\" (UniqueName: \"kubernetes.io/projected/0d69c3f0-9226-4083-87a6-69e589b0869b-kube-api-access-2g2b4\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.776665 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.781781 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d69c3f0-9226-4083-87a6-69e589b0869b" (UID: "0d69c3f0-9226-4083-87a6-69e589b0869b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.798519 4706 scope.go:117] "RemoveContainer" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" Dec 06 05:50:12 crc kubenswrapper[4706]: E1206 05:50:12.800815 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37\": container with ID starting with ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37 not found: ID does not exist" containerID="ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.800870 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37"} err="failed to get container status \"ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37\": rpc error: code = NotFound desc = could not find container \"ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37\": container with ID starting with ca11c3fb2b92bb31a0e718f4394cc1fdf4a597aaa755ae11d1deda9d579f1a37 not found: ID does not exist" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878452 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/085d0127-557c-49a2-80f4-2a86fed685cc-logs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878509 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq7zd\" (UniqueName: \"kubernetes.io/projected/085d0127-557c-49a2-80f4-2a86fed685cc-kube-api-access-bq7zd\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878553 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-config-data\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878618 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-public-tls-certs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878651 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878715 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-internal-tls-certs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878778 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d69c3f0-9226-4083-87a6-69e589b0869b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.878960 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/085d0127-557c-49a2-80f4-2a86fed685cc-logs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.882684 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-internal-tls-certs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.882845 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-public-tls-certs\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.883747 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-config-data\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.886331 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/085d0127-557c-49a2-80f4-2a86fed685cc-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:12 crc kubenswrapper[4706]: I1206 05:50:12.896579 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq7zd\" (UniqueName: \"kubernetes.io/projected/085d0127-557c-49a2-80f4-2a86fed685cc-kube-api-access-bq7zd\") pod \"nova-api-0\" (UID: \"085d0127-557c-49a2-80f4-2a86fed685cc\") " pod="openstack/nova-api-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.051380 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.109902 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.131675 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.192899 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.198851 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.204736 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.220945 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.389320 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24bb9983-5fec-49b8-9cff-cb2c111af5b9-config-data\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.389371 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ws4jj\" (UniqueName: \"kubernetes.io/projected/24bb9983-5fec-49b8-9cff-cb2c111af5b9-kube-api-access-ws4jj\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.389504 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24bb9983-5fec-49b8-9cff-cb2c111af5b9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.491666 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24bb9983-5fec-49b8-9cff-cb2c111af5b9-config-data\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.491745 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ws4jj\" (UniqueName: \"kubernetes.io/projected/24bb9983-5fec-49b8-9cff-cb2c111af5b9-kube-api-access-ws4jj\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.491909 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24bb9983-5fec-49b8-9cff-cb2c111af5b9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.497870 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24bb9983-5fec-49b8-9cff-cb2c111af5b9-config-data\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.498037 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24bb9983-5fec-49b8-9cff-cb2c111af5b9-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.507434 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ws4jj\" (UniqueName: \"kubernetes.io/projected/24bb9983-5fec-49b8-9cff-cb2c111af5b9-kube-api-access-ws4jj\") pod \"nova-scheduler-0\" (UID: \"24bb9983-5fec-49b8-9cff-cb2c111af5b9\") " pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.636997 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.663975 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Dec 06 05:50:13 crc kubenswrapper[4706]: W1206 05:50:13.672460 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod085d0127_557c_49a2_80f4_2a86fed685cc.slice/crio-08c0cebcf77ffcceb824977c1dba4dbf6cb6e6bf8dcd6af024af1bd2c25e00b7 WatchSource:0}: Error finding container 08c0cebcf77ffcceb824977c1dba4dbf6cb6e6bf8dcd6af024af1bd2c25e00b7: Status 404 returned error can't find the container with id 08c0cebcf77ffcceb824977c1dba4dbf6cb6e6bf8dcd6af024af1bd2c25e00b7 Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.683675 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"784eb2e8-d56e-4523-86cf-b67f953db54d","Type":"ContainerStarted","Data":"6b342a4b02c48375fa214b4a57dcf9468dfd01064dd22a668c064c2398218b8b"} Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.683711 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"784eb2e8-d56e-4523-86cf-b67f953db54d","Type":"ContainerStarted","Data":"47f9ba33c490a92abbb2d54a2279144f7a274f20a5a74277961031595e12c8e2"} Dec 06 05:50:13 crc kubenswrapper[4706]: I1206 05:50:13.942025 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Dec 06 05:50:13 crc kubenswrapper[4706]: W1206 05:50:13.950913 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod24bb9983_5fec_49b8_9cff_cb2c111af5b9.slice/crio-2de61fda55afbbe3c737683cc04720e29470d582cad1beba71f85a63601e2c60 WatchSource:0}: Error finding container 2de61fda55afbbe3c737683cc04720e29470d582cad1beba71f85a63601e2c60: Status 404 returned error can't find the container with id 2de61fda55afbbe3c737683cc04720e29470d582cad1beba71f85a63601e2c60 Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.054812 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d69c3f0-9226-4083-87a6-69e589b0869b" path="/var/lib/kubelet/pods/0d69c3f0-9226-4083-87a6-69e589b0869b/volumes" Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.055833 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="766ae73e-0923-4626-bb2d-03a302967827" path="/var/lib/kubelet/pods/766ae73e-0923-4626-bb2d-03a302967827/volumes" Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.709348 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"24bb9983-5fec-49b8-9cff-cb2c111af5b9","Type":"ContainerStarted","Data":"02898f1294c889a9e46361e43bc81825bbabc422151440c256463302bdab8405"} Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.709402 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"24bb9983-5fec-49b8-9cff-cb2c111af5b9","Type":"ContainerStarted","Data":"2de61fda55afbbe3c737683cc04720e29470d582cad1beba71f85a63601e2c60"} Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.710879 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"085d0127-557c-49a2-80f4-2a86fed685cc","Type":"ContainerStarted","Data":"6e5d182b8e2e46f21ea4b5043125e87461764abb4a658e72f4bc41d4248a2a0c"} Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.710912 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"085d0127-557c-49a2-80f4-2a86fed685cc","Type":"ContainerStarted","Data":"08c0cebcf77ffcceb824977c1dba4dbf6cb6e6bf8dcd6af024af1bd2c25e00b7"} Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.728715 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.728698648 podStartE2EDuration="1.728698648s" podCreationTimestamp="2025-12-06 05:50:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:50:14.72800553 +0000 UTC m=+1837.055829474" watchObservedRunningTime="2025-12-06 05:50:14.728698648 +0000 UTC m=+1837.056522592" Dec 06 05:50:14 crc kubenswrapper[4706]: I1206 05:50:14.751214 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.751197056 podStartE2EDuration="3.751197056s" podCreationTimestamp="2025-12-06 05:50:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:50:14.751062742 +0000 UTC m=+1837.078886686" watchObservedRunningTime="2025-12-06 05:50:14.751197056 +0000 UTC m=+1837.079021000" Dec 06 05:50:15 crc kubenswrapper[4706]: I1206 05:50:15.037468 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:50:15 crc kubenswrapper[4706]: E1206 05:50:15.038135 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:50:15 crc kubenswrapper[4706]: I1206 05:50:15.725008 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"085d0127-557c-49a2-80f4-2a86fed685cc","Type":"ContainerStarted","Data":"8c7bd2bf7a2ae741e40986657ef5cf6f13d5a7fdb1e67976b3cf98e7d615e242"} Dec 06 05:50:15 crc kubenswrapper[4706]: I1206 05:50:15.752998 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.752974278 podStartE2EDuration="3.752974278s" podCreationTimestamp="2025-12-06 05:50:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:50:15.748366643 +0000 UTC m=+1838.076190627" watchObservedRunningTime="2025-12-06 05:50:15.752974278 +0000 UTC m=+1838.080798252" Dec 06 05:50:17 crc kubenswrapper[4706]: I1206 05:50:17.045710 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 06 05:50:17 crc kubenswrapper[4706]: I1206 05:50:17.045758 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Dec 06 05:50:18 crc kubenswrapper[4706]: I1206 05:50:18.638063 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Dec 06 05:50:22 crc kubenswrapper[4706]: I1206 05:50:22.052025 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 06 05:50:22 crc kubenswrapper[4706]: I1206 05:50:22.052609 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Dec 06 05:50:22 crc kubenswrapper[4706]: I1206 05:50:22.062237 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.065271 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="784eb2e8-d56e-4523-86cf-b67f953db54d" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.065213 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="784eb2e8-d56e-4523-86cf-b67f953db54d" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.202:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.113199 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.113242 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.637236 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.665615 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Dec 06 05:50:23 crc kubenswrapper[4706]: I1206 05:50:23.826552 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Dec 06 05:50:24 crc kubenswrapper[4706]: I1206 05:50:24.129302 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="085d0127-557c-49a2-80f4-2a86fed685cc" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:50:24 crc kubenswrapper[4706]: I1206 05:50:24.129262 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="085d0127-557c-49a2-80f4-2a86fed685cc" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Dec 06 05:50:28 crc kubenswrapper[4706]: I1206 05:50:28.043390 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:50:28 crc kubenswrapper[4706]: E1206 05:50:28.044496 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:50:32 crc kubenswrapper[4706]: I1206 05:50:32.050874 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 06 05:50:32 crc kubenswrapper[4706]: I1206 05:50:32.051732 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Dec 06 05:50:32 crc kubenswrapper[4706]: I1206 05:50:32.059157 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 06 05:50:32 crc kubenswrapper[4706]: I1206 05:50:32.891326 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Dec 06 05:50:33 crc kubenswrapper[4706]: I1206 05:50:33.119108 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 06 05:50:33 crc kubenswrapper[4706]: I1206 05:50:33.120331 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 06 05:50:33 crc kubenswrapper[4706]: I1206 05:50:33.121653 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Dec 06 05:50:33 crc kubenswrapper[4706]: I1206 05:50:33.125718 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 06 05:50:33 crc kubenswrapper[4706]: I1206 05:50:33.883012 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Dec 06 05:50:33 crc kubenswrapper[4706]: I1206 05:50:33.887856 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Dec 06 05:50:41 crc kubenswrapper[4706]: I1206 05:50:41.652929 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:50:42 crc kubenswrapper[4706]: I1206 05:50:42.496785 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:50:43 crc kubenswrapper[4706]: I1206 05:50:43.039009 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:50:43 crc kubenswrapper[4706]: E1206 05:50:43.039453 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:50:46 crc kubenswrapper[4706]: I1206 05:50:46.074658 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="rabbitmq" containerID="cri-o://5969ecbb31882bd14416f895bd2e15ae1d88ac9c6fa0fe23318ae5bee33e8892" gracePeriod=604796 Dec 06 05:50:46 crc kubenswrapper[4706]: I1206 05:50:46.824252 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="rabbitmq" containerID="cri-o://03b7e3b2fa72f543f655f0cf67b89d244c143d11bda2a8d13e353c58416bffa8" gracePeriod=604796 Dec 06 05:50:50 crc kubenswrapper[4706]: I1206 05:50:50.798555 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.94:5671: connect: connection refused" Dec 06 05:50:50 crc kubenswrapper[4706]: I1206 05:50:50.847474 4706 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.95:5671: connect: connection refused" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.059011 4706 generic.go:334] "Generic (PLEG): container finished" podID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerID="5969ecbb31882bd14416f895bd2e15ae1d88ac9c6fa0fe23318ae5bee33e8892" exitCode=0 Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.059125 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bfd60e65-9bee-4772-bbd5-b6d64a5a225c","Type":"ContainerDied","Data":"5969ecbb31882bd14416f895bd2e15ae1d88ac9c6fa0fe23318ae5bee33e8892"} Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.843568 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.981908 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-plugins-conf\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982040 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-pod-info\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982099 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-erlang-cookie\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982144 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-server-conf\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982175 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-erlang-cookie-secret\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982314 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94qhx\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-kube-api-access-94qhx\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982795 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982932 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.982956 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-plugins\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.983136 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-tls\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.983179 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.983251 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-config-data\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.983269 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-confd\") pod \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\" (UID: \"bfd60e65-9bee-4772-bbd5-b6d64a5a225c\") " Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.983279 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.985782 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.985816 4706 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.985830 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.988127 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-pod-info" (OuterVolumeSpecName: "pod-info") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.988652 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-kube-api-access-94qhx" (OuterVolumeSpecName: "kube-api-access-94qhx") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "kube-api-access-94qhx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.989121 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "persistence") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 06 05:50:53 crc kubenswrapper[4706]: I1206 05:50:53.992254 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.000279 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.019744 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-config-data" (OuterVolumeSpecName: "config-data") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.037530 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:50:54 crc kubenswrapper[4706]: E1206 05:50:54.037963 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.055810 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-server-conf" (OuterVolumeSpecName: "server-conf") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.084762 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.087859 4706 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-pod-info\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.089482 4706 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-server-conf\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.089510 4706 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.089532 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94qhx\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-kube-api-access-94qhx\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.089543 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.089561 4706 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.089571 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.132531 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "bfd60e65-9bee-4772-bbd5-b6d64a5a225c" (UID: "bfd60e65-9bee-4772-bbd5-b6d64a5a225c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.139264 4706 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.164195 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bfd60e65-9bee-4772-bbd5-b6d64a5a225c","Type":"ContainerDied","Data":"0b15f626a9411f3bced3566da1d506b9c72eb68f413c6ccf5a0cd47309ab5802"} Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.164257 4706 scope.go:117] "RemoveContainer" containerID="5969ecbb31882bd14416f895bd2e15ae1d88ac9c6fa0fe23318ae5bee33e8892" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.191347 4706 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.191401 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bfd60e65-9bee-4772-bbd5-b6d64a5a225c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.287854 4706 scope.go:117] "RemoveContainer" containerID="59ea841a87bb87aa6d7b186eaa2155dbd28a5d718db5af4f41b422fa2c8ac0c7" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.417557 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.428555 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.447975 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:50:54 crc kubenswrapper[4706]: E1206 05:50:54.448456 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="setup-container" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.448477 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="setup-container" Dec 06 05:50:54 crc kubenswrapper[4706]: E1206 05:50:54.448506 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="rabbitmq" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.448515 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="rabbitmq" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.448747 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" containerName="rabbitmq" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.449916 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.451957 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.455559 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.455578 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.455815 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.455947 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.455989 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.455958 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-l65q5" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.467030 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.596898 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6be686b8-8844-4721-8b68-cd8b4d338517-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.596948 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.596994 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597300 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9mplb\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-kube-api-access-9mplb\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597376 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6be686b8-8844-4721-8b68-cd8b4d338517-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597482 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597512 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597565 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597659 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597699 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-config-data\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.597782 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699299 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6be686b8-8844-4721-8b68-cd8b4d338517-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699349 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699383 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699429 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9mplb\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-kube-api-access-9mplb\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699454 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6be686b8-8844-4721-8b68-cd8b4d338517-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699516 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699553 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699594 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699610 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-config-data\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.699641 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.700203 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.700668 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.701188 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.701256 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.701482 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-config-data\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.702014 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/6be686b8-8844-4721-8b68-cd8b4d338517-server-conf\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.705255 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.705356 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/6be686b8-8844-4721-8b68-cd8b4d338517-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.705889 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.707526 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/6be686b8-8844-4721-8b68-cd8b4d338517-pod-info\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.717028 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9mplb\" (UniqueName: \"kubernetes.io/projected/6be686b8-8844-4721-8b68-cd8b4d338517-kube-api-access-9mplb\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.747229 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"rabbitmq-server-0\" (UID: \"6be686b8-8844-4721-8b68-cd8b4d338517\") " pod="openstack/rabbitmq-server-0" Dec 06 05:50:54 crc kubenswrapper[4706]: I1206 05:50:54.771564 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.110612 4706 generic.go:334] "Generic (PLEG): container finished" podID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerID="03b7e3b2fa72f543f655f0cf67b89d244c143d11bda2a8d13e353c58416bffa8" exitCode=0 Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.110784 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e","Type":"ContainerDied","Data":"03b7e3b2fa72f543f655f0cf67b89d244c143d11bda2a8d13e353c58416bffa8"} Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.214456 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.263143 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325798 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-pod-info\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325844 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-plugins-conf\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325878 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-tls\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325907 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-confd\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325955 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-plugins\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325975 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-config-data\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.325998 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-erlang-cookie-secret\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.326028 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-erlang-cookie\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.326074 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.326099 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-57w24\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-kube-api-access-57w24\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.326153 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-server-conf\") pod \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\" (UID: \"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e\") " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.329144 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.332525 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "persistence") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.333958 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.335224 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-pod-info" (OuterVolumeSpecName: "pod-info") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.335935 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.346158 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-kube-api-access-57w24" (OuterVolumeSpecName: "kube-api-access-57w24") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "kube-api-access-57w24". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.346338 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.355772 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.387989 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-config-data" (OuterVolumeSpecName: "config-data") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.411630 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-server-conf" (OuterVolumeSpecName: "server-conf") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428569 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428604 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428613 4706 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428623 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428649 4706 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428659 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-57w24\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-kube-api-access-57w24\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428669 4706 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-server-conf\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428677 4706 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-pod-info\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428684 4706 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-plugins-conf\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.428693 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.463815 4706 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.476339 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" (UID: "f16a0463-de95-4c8c-a1b5-d80e8a2ec59e"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.530411 4706 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:55 crc kubenswrapper[4706]: I1206 05:50:55.530442 4706 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.048680 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bfd60e65-9bee-4772-bbd5-b6d64a5a225c" path="/var/lib/kubelet/pods/bfd60e65-9bee-4772-bbd5-b6d64a5a225c/volumes" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.050195 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-4h5hd"] Dec 06 05:50:56 crc kubenswrapper[4706]: E1206 05:50:56.056755 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="rabbitmq" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.056783 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="rabbitmq" Dec 06 05:50:56 crc kubenswrapper[4706]: E1206 05:50:56.056796 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="setup-container" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.056803 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="setup-container" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.057003 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" containerName="rabbitmq" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.058086 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-4h5hd"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.058195 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.060261 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.121849 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"f16a0463-de95-4c8c-a1b5-d80e8a2ec59e","Type":"ContainerDied","Data":"46d3335f068a1373fca9307feb78c1dbcdf94557955afa5e22413ab46f343bc9"} Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.121888 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.121905 4706 scope.go:117] "RemoveContainer" containerID="03b7e3b2fa72f543f655f0cf67b89d244c143d11bda2a8d13e353c58416bffa8" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.123775 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6be686b8-8844-4721-8b68-cd8b4d338517","Type":"ContainerStarted","Data":"a7fe97ffc244e99bc887f5d63381e75b8f66273ca4bc16dff50386719e2a35dc"} Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.144223 4706 scope.go:117] "RemoveContainer" containerID="3af9c99284043d95d1d0e700f9d3e8775e1b02554878dda547e21c5836505241" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.173137 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.185197 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.208886 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.215722 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.223885 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.224154 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-gnvvx" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.224165 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.224341 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.224356 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.224509 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.224638 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244188 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-swift-storage-0\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244268 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-svc\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244310 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-sb\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244395 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drk9z\" (UniqueName: \"kubernetes.io/projected/98e703f4-7461-4659-956d-c0aed3c91c6f-kube-api-access-drk9z\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244424 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-openstack-edpm-ipam\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244513 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-nb\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.244574 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-config\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.246669 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350510 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drk9z\" (UniqueName: \"kubernetes.io/projected/98e703f4-7461-4659-956d-c0aed3c91c6f-kube-api-access-drk9z\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350586 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-openstack-edpm-ipam\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350656 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350688 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350719 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350749 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350799 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffnnr\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-kube-api-access-ffnnr\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350847 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/965d89e8-6db9-49d7-b516-ee4039b050eb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350882 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-nb\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350937 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.350985 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-config\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351014 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/965d89e8-6db9-49d7-b516-ee4039b050eb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351098 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-swift-storage-0\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351127 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351175 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-svc\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351208 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351252 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.351290 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-sb\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.352596 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-sb\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.353072 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-openstack-edpm-ipam\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.356040 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-svc\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.356802 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-swift-storage-0\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.358435 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-nb\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.364581 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-config\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.389307 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-4h5hd"] Dec 06 05:50:56 crc kubenswrapper[4706]: E1206 05:50:56.390094 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-drk9z], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" podUID="98e703f4-7461-4659-956d-c0aed3c91c6f" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.421157 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drk9z\" (UniqueName: \"kubernetes.io/projected/98e703f4-7461-4659-956d-c0aed3c91c6f-kube-api-access-drk9z\") pod \"dnsmasq-dns-67b789f86c-4h5hd\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.424355 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cb6ffcf87-5dz8j"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.426229 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.442612 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb6ffcf87-5dz8j"] Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453240 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453292 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffnnr\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-kube-api-access-ffnnr\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453321 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/965d89e8-6db9-49d7-b516-ee4039b050eb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453358 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453383 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/965d89e8-6db9-49d7-b516-ee4039b050eb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453405 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453430 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453449 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453523 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453542 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.453557 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.456733 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.456995 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.458386 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.458903 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.459979 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/965d89e8-6db9-49d7-b516-ee4039b050eb-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.460219 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.485660 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.485703 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/965d89e8-6db9-49d7-b516-ee4039b050eb-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.487970 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/965d89e8-6db9-49d7-b516-ee4039b050eb-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.491707 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.491918 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffnnr\" (UniqueName: \"kubernetes.io/projected/965d89e8-6db9-49d7-b516-ee4039b050eb-kube-api-access-ffnnr\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.497002 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"965d89e8-6db9-49d7-b516-ee4039b050eb\") " pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.537361 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557224 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-ovsdbserver-nb\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557301 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-config\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557322 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qcxqn\" (UniqueName: \"kubernetes.io/projected/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-kube-api-access-qcxqn\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557355 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-openstack-edpm-ipam\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557392 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-dns-swift-storage-0\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557430 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-ovsdbserver-sb\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.557452 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-dns-svc\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.659093 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-ovsdbserver-sb\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.659487 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-dns-svc\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.659946 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-ovsdbserver-nb\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.660187 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-config\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.660252 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qcxqn\" (UniqueName: \"kubernetes.io/projected/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-kube-api-access-qcxqn\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.660415 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-openstack-edpm-ipam\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.660555 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-dns-svc\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.660560 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-dns-swift-storage-0\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.661313 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-ovsdbserver-sb\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.661532 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-dns-swift-storage-0\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.661589 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-config\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.661695 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-ovsdbserver-nb\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.662951 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-openstack-edpm-ipam\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.684310 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qcxqn\" (UniqueName: \"kubernetes.io/projected/e7068fc5-ddf3-4a32-bf1a-803684a95dd3-kube-api-access-qcxqn\") pod \"dnsmasq-dns-cb6ffcf87-5dz8j\" (UID: \"e7068fc5-ddf3-4a32-bf1a-803684a95dd3\") " pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:56 crc kubenswrapper[4706]: I1206 05:50:56.932756 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.044830 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.139630 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6be686b8-8844-4721-8b68-cd8b4d338517","Type":"ContainerStarted","Data":"308b9c3546a352ee6ee5a1bf8abd378bf7356db84f22666de0b988776456176e"} Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.149772 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.150424 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"965d89e8-6db9-49d7-b516-ee4039b050eb","Type":"ContainerStarted","Data":"509fd9457b1381da0d1da65ce0196d43fb20e005b6e77855aebe4c5c157a6c76"} Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.190138 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275598 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-openstack-edpm-ipam\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275659 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-nb\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275682 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-sb\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275709 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drk9z\" (UniqueName: \"kubernetes.io/projected/98e703f4-7461-4659-956d-c0aed3c91c6f-kube-api-access-drk9z\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275737 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-config\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275788 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-swift-storage-0\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.275896 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-svc\") pod \"98e703f4-7461-4659-956d-c0aed3c91c6f\" (UID: \"98e703f4-7461-4659-956d-c0aed3c91c6f\") " Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.276334 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.276373 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.276524 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.276690 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-config" (OuterVolumeSpecName: "config") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.276821 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.276810 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.277235 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.279319 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98e703f4-7461-4659-956d-c0aed3c91c6f-kube-api-access-drk9z" (OuterVolumeSpecName: "kube-api-access-drk9z") pod "98e703f4-7461-4659-956d-c0aed3c91c6f" (UID: "98e703f4-7461-4659-956d-c0aed3c91c6f"). InnerVolumeSpecName "kube-api-access-drk9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.378080 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.378384 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.378394 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drk9z\" (UniqueName: \"kubernetes.io/projected/98e703f4-7461-4659-956d-c0aed3c91c6f-kube-api-access-drk9z\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.378403 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.378412 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.378423 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/98e703f4-7461-4659-956d-c0aed3c91c6f-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:50:57 crc kubenswrapper[4706]: W1206 05:50:57.386841 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7068fc5_ddf3_4a32_bf1a_803684a95dd3.slice/crio-9d470e2b5e1889cf673b655413ad781c2eb294a4aa1a37b77041011f7a9e5922 WatchSource:0}: Error finding container 9d470e2b5e1889cf673b655413ad781c2eb294a4aa1a37b77041011f7a9e5922: Status 404 returned error can't find the container with id 9d470e2b5e1889cf673b655413ad781c2eb294a4aa1a37b77041011f7a9e5922 Dec 06 05:50:57 crc kubenswrapper[4706]: I1206 05:50:57.394600 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb6ffcf87-5dz8j"] Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.047793 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f16a0463-de95-4c8c-a1b5-d80e8a2ec59e" path="/var/lib/kubelet/pods/f16a0463-de95-4c8c-a1b5-d80e8a2ec59e/volumes" Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.197267 4706 generic.go:334] "Generic (PLEG): container finished" podID="e7068fc5-ddf3-4a32-bf1a-803684a95dd3" containerID="9b327c3b69136e7806f76794533a0879e530e52e10ad35882c7c00a2af2d080f" exitCode=0 Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.197466 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" event={"ID":"e7068fc5-ddf3-4a32-bf1a-803684a95dd3","Type":"ContainerDied","Data":"9b327c3b69136e7806f76794533a0879e530e52e10ad35882c7c00a2af2d080f"} Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.198097 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" event={"ID":"e7068fc5-ddf3-4a32-bf1a-803684a95dd3","Type":"ContainerStarted","Data":"9d470e2b5e1889cf673b655413ad781c2eb294a4aa1a37b77041011f7a9e5922"} Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.198165 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-4h5hd" Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.273906 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-4h5hd"] Dec 06 05:50:58 crc kubenswrapper[4706]: I1206 05:50:58.282448 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-4h5hd"] Dec 06 05:50:59 crc kubenswrapper[4706]: I1206 05:50:59.208867 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"965d89e8-6db9-49d7-b516-ee4039b050eb","Type":"ContainerStarted","Data":"bde85a93a025fb1c9186000836a2ee71cc549c263528af05edf6baa55b57c8be"} Dec 06 05:50:59 crc kubenswrapper[4706]: I1206 05:50:59.210961 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" event={"ID":"e7068fc5-ddf3-4a32-bf1a-803684a95dd3","Type":"ContainerStarted","Data":"852f0f0cfb110c046d45b76dfaba88846e743a10484e225e2b80c1c205039147"} Dec 06 05:51:00 crc kubenswrapper[4706]: I1206 05:51:00.048143 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98e703f4-7461-4659-956d-c0aed3c91c6f" path="/var/lib/kubelet/pods/98e703f4-7461-4659-956d-c0aed3c91c6f/volumes" Dec 06 05:51:00 crc kubenswrapper[4706]: I1206 05:51:00.220196 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:51:00 crc kubenswrapper[4706]: I1206 05:51:00.242338 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" podStartSLOduration=4.24232031 podStartE2EDuration="4.24232031s" podCreationTimestamp="2025-12-06 05:50:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:51:00.236541093 +0000 UTC m=+1882.564365047" watchObservedRunningTime="2025-12-06 05:51:00.24232031 +0000 UTC m=+1882.570144254" Dec 06 05:51:05 crc kubenswrapper[4706]: I1206 05:51:05.037091 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:51:05 crc kubenswrapper[4706]: E1206 05:51:05.038026 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:51:06 crc kubenswrapper[4706]: I1206 05:51:06.934219 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cb6ffcf87-5dz8j" Dec 06 05:51:07 crc kubenswrapper[4706]: I1206 05:51:07.010453 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-f4k9q"] Dec 06 05:51:07 crc kubenswrapper[4706]: I1206 05:51:07.010707 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerName="dnsmasq-dns" containerID="cri-o://1f3a99267e795dff0557aaf03d838400653c1ee98d08e931b61dea17094f4259" gracePeriod=10 Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.315485 4706 generic.go:334] "Generic (PLEG): container finished" podID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerID="1f3a99267e795dff0557aaf03d838400653c1ee98d08e931b61dea17094f4259" exitCode=0 Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.315572 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" event={"ID":"ad459e55-c5fb-42bc-8e86-af5e22355607","Type":"ContainerDied","Data":"1f3a99267e795dff0557aaf03d838400653c1ee98d08e931b61dea17094f4259"} Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.316104 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" event={"ID":"ad459e55-c5fb-42bc-8e86-af5e22355607","Type":"ContainerDied","Data":"7bafdb5b59de68bf81052392d0087f9a8d2b37da9b3b7cd0d1a3a54ee4854ea7"} Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.316121 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7bafdb5b59de68bf81052392d0087f9a8d2b37da9b3b7cd0d1a3a54ee4854ea7" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.322313 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.490936 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-config\") pod \"ad459e55-c5fb-42bc-8e86-af5e22355607\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.491017 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-swift-storage-0\") pod \"ad459e55-c5fb-42bc-8e86-af5e22355607\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.491164 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-nb\") pod \"ad459e55-c5fb-42bc-8e86-af5e22355607\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.491381 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-sb\") pod \"ad459e55-c5fb-42bc-8e86-af5e22355607\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.491441 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zg8mk\" (UniqueName: \"kubernetes.io/projected/ad459e55-c5fb-42bc-8e86-af5e22355607-kube-api-access-zg8mk\") pod \"ad459e55-c5fb-42bc-8e86-af5e22355607\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.491481 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-svc\") pod \"ad459e55-c5fb-42bc-8e86-af5e22355607\" (UID: \"ad459e55-c5fb-42bc-8e86-af5e22355607\") " Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.501012 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad459e55-c5fb-42bc-8e86-af5e22355607-kube-api-access-zg8mk" (OuterVolumeSpecName: "kube-api-access-zg8mk") pod "ad459e55-c5fb-42bc-8e86-af5e22355607" (UID: "ad459e55-c5fb-42bc-8e86-af5e22355607"). InnerVolumeSpecName "kube-api-access-zg8mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.542758 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ad459e55-c5fb-42bc-8e86-af5e22355607" (UID: "ad459e55-c5fb-42bc-8e86-af5e22355607"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.547254 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-config" (OuterVolumeSpecName: "config") pod "ad459e55-c5fb-42bc-8e86-af5e22355607" (UID: "ad459e55-c5fb-42bc-8e86-af5e22355607"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.561235 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ad459e55-c5fb-42bc-8e86-af5e22355607" (UID: "ad459e55-c5fb-42bc-8e86-af5e22355607"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.576568 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ad459e55-c5fb-42bc-8e86-af5e22355607" (UID: "ad459e55-c5fb-42bc-8e86-af5e22355607"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.579069 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ad459e55-c5fb-42bc-8e86-af5e22355607" (UID: "ad459e55-c5fb-42bc-8e86-af5e22355607"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.610012 4706 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.610353 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.610491 4706 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.610631 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zg8mk\" (UniqueName: \"kubernetes.io/projected/ad459e55-c5fb-42bc-8e86-af5e22355607-kube-api-access-zg8mk\") on node \"crc\" DevicePath \"\"" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.610758 4706 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-dns-svc\") on node \"crc\" DevicePath \"\"" Dec 06 05:51:08 crc kubenswrapper[4706]: I1206 05:51:08.610913 4706 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ad459e55-c5fb-42bc-8e86-af5e22355607-config\") on node \"crc\" DevicePath \"\"" Dec 06 05:51:09 crc kubenswrapper[4706]: I1206 05:51:09.323365 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-f4k9q" Dec 06 05:51:09 crc kubenswrapper[4706]: I1206 05:51:09.361062 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-f4k9q"] Dec 06 05:51:09 crc kubenswrapper[4706]: I1206 05:51:09.369384 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-f4k9q"] Dec 06 05:51:10 crc kubenswrapper[4706]: I1206 05:51:10.051796 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" path="/var/lib/kubelet/pods/ad459e55-c5fb-42bc-8e86-af5e22355607/volumes" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.375766 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75"] Dec 06 05:51:15 crc kubenswrapper[4706]: E1206 05:51:15.376665 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerName="init" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.376677 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerName="init" Dec 06 05:51:15 crc kubenswrapper[4706]: E1206 05:51:15.376704 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerName="dnsmasq-dns" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.376711 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerName="dnsmasq-dns" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.376885 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad459e55-c5fb-42bc-8e86-af5e22355607" containerName="dnsmasq-dns" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.377499 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.379893 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.382555 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.382705 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.382709 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.393399 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75"] Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.436586 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.436638 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhc9t\" (UniqueName: \"kubernetes.io/projected/7051aff0-e824-43eb-a501-3c02108f96ee-kube-api-access-hhc9t\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.436660 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.436796 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.538127 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.538184 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhc9t\" (UniqueName: \"kubernetes.io/projected/7051aff0-e824-43eb-a501-3c02108f96ee-kube-api-access-hhc9t\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.538220 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.538353 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.544296 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.544473 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.544687 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.553907 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhc9t\" (UniqueName: \"kubernetes.io/projected/7051aff0-e824-43eb-a501-3c02108f96ee-kube-api-access-hhc9t\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:15 crc kubenswrapper[4706]: I1206 05:51:15.698792 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:51:16 crc kubenswrapper[4706]: I1206 05:51:16.244287 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75"] Dec 06 05:51:16 crc kubenswrapper[4706]: I1206 05:51:16.404559 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" event={"ID":"7051aff0-e824-43eb-a501-3c02108f96ee","Type":"ContainerStarted","Data":"4b2e90203baa5a194a3ffcb887705ffaa32e0ba16c717dd0e7d124bde3121562"} Dec 06 05:51:17 crc kubenswrapper[4706]: I1206 05:51:17.036866 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:51:17 crc kubenswrapper[4706]: E1206 05:51:17.038285 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:51:28 crc kubenswrapper[4706]: I1206 05:51:28.045842 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:51:28 crc kubenswrapper[4706]: E1206 05:51:28.046679 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:51:30 crc kubenswrapper[4706]: I1206 05:51:30.537317 4706 generic.go:334] "Generic (PLEG): container finished" podID="6be686b8-8844-4721-8b68-cd8b4d338517" containerID="308b9c3546a352ee6ee5a1bf8abd378bf7356db84f22666de0b988776456176e" exitCode=0 Dec 06 05:51:30 crc kubenswrapper[4706]: I1206 05:51:30.537391 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6be686b8-8844-4721-8b68-cd8b4d338517","Type":"ContainerDied","Data":"308b9c3546a352ee6ee5a1bf8abd378bf7356db84f22666de0b988776456176e"} Dec 06 05:51:31 crc kubenswrapper[4706]: I1206 05:51:31.555541 4706 generic.go:334] "Generic (PLEG): container finished" podID="965d89e8-6db9-49d7-b516-ee4039b050eb" containerID="bde85a93a025fb1c9186000836a2ee71cc549c263528af05edf6baa55b57c8be" exitCode=0 Dec 06 05:51:31 crc kubenswrapper[4706]: I1206 05:51:31.555755 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"965d89e8-6db9-49d7-b516-ee4039b050eb","Type":"ContainerDied","Data":"bde85a93a025fb1c9186000836a2ee71cc549c263528af05edf6baa55b57c8be"} Dec 06 05:51:40 crc kubenswrapper[4706]: I1206 05:51:40.036700 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:51:40 crc kubenswrapper[4706]: E1206 05:51:40.037628 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:51:40 crc kubenswrapper[4706]: E1206 05:51:40.775475 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/openstack-k8s-operators/openstack-ansibleee-runner:1be4a21e16c214012ab35d75a83ce5b3c85f5ed2" Dec 06 05:51:40 crc kubenswrapper[4706]: E1206 05:51:40.775524 4706 kuberuntime_image.go:55] "Failed to pull image" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.rdoproject.org/openstack-k8s-operators/openstack-ansibleee-runner:1be4a21e16c214012ab35d75a83ce5b3c85f5ed2" Dec 06 05:51:40 crc kubenswrapper[4706]: E1206 05:51:40.775661 4706 kuberuntime_manager.go:1274] "Unhandled Error" err=< Dec 06 05:51:40 crc kubenswrapper[4706]: container &Container{Name:repo-setup-edpm-deployment-openstack-edpm-ipam,Image:quay.rdoproject.org/openstack-k8s-operators/openstack-ansibleee-runner:1be4a21e16c214012ab35d75a83ce5b3c85f5ed2,Command:[],Args:[ansible-runner run /runner -p playbook.yaml -i repo-setup-edpm-deployment-openstack-edpm-ipam],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:ANSIBLE_VERBOSITY,Value:2,ValueFrom:nil,},EnvVar{Name:RUNNER_PLAYBOOK,Value: Dec 06 05:51:40 crc kubenswrapper[4706]: - hosts: all Dec 06 05:51:40 crc kubenswrapper[4706]: strategy: linear Dec 06 05:51:40 crc kubenswrapper[4706]: tasks: Dec 06 05:51:40 crc kubenswrapper[4706]: - name: Enable podified-repos Dec 06 05:51:40 crc kubenswrapper[4706]: become: true Dec 06 05:51:40 crc kubenswrapper[4706]: ansible.builtin.shell: | Dec 06 05:51:40 crc kubenswrapper[4706]: set -euxo pipefail Dec 06 05:51:40 crc kubenswrapper[4706]: pushd /var/tmp Dec 06 05:51:40 crc kubenswrapper[4706]: curl -sL https://github.com/openstack-k8s-operators/repo-setup/archive/refs/heads/main.tar.gz | tar -xz Dec 06 05:51:40 crc kubenswrapper[4706]: pushd repo-setup-main Dec 06 05:51:40 crc kubenswrapper[4706]: python3 -m venv ./venv Dec 06 05:51:40 crc kubenswrapper[4706]: PBR_VERSION=0.0.0 ./venv/bin/pip install ./ Dec 06 05:51:40 crc kubenswrapper[4706]: ./venv/bin/repo-setup current-podified -b antelope Dec 06 05:51:40 crc kubenswrapper[4706]: popd Dec 06 05:51:40 crc kubenswrapper[4706]: rm -rf repo-setup-main Dec 06 05:51:40 crc kubenswrapper[4706]: Dec 06 05:51:40 crc kubenswrapper[4706]: Dec 06 05:51:40 crc kubenswrapper[4706]: ,ValueFrom:nil,},EnvVar{Name:RUNNER_EXTRA_VARS,Value: Dec 06 05:51:40 crc kubenswrapper[4706]: edpm_override_hosts: openstack-edpm-ipam Dec 06 05:51:40 crc kubenswrapper[4706]: edpm_service_type: repo-setup Dec 06 05:51:40 crc kubenswrapper[4706]: Dec 06 05:51:40 crc kubenswrapper[4706]: Dec 06 05:51:40 crc kubenswrapper[4706]: ,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:repo-setup-combined-ca-bundle,ReadOnly:false,MountPath:/var/lib/openstack/cacerts/repo-setup,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/runner/env/ssh_key,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:inventory,ReadOnly:false,MountPath:/runner/inventory/hosts,SubPath:inventory,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hhc9t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:openstack-aee-default-env,},Optional:*true,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75_openstack(7051aff0-e824-43eb-a501-3c02108f96ee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled Dec 06 05:51:40 crc kubenswrapper[4706]: > logger="UnhandledError" Dec 06 05:51:40 crc kubenswrapper[4706]: E1206 05:51:40.777239 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" podUID="7051aff0-e824-43eb-a501-3c02108f96ee" Dec 06 05:51:41 crc kubenswrapper[4706]: I1206 05:51:41.642492 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"965d89e8-6db9-49d7-b516-ee4039b050eb","Type":"ContainerStarted","Data":"c6cf2e453e97ee2aa7e1718dd77faba6a895d9cf9b7f1b7e0ca3c0f9000097f1"} Dec 06 05:51:41 crc kubenswrapper[4706]: I1206 05:51:41.642703 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:51:41 crc kubenswrapper[4706]: I1206 05:51:41.644973 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"6be686b8-8844-4721-8b68-cd8b4d338517","Type":"ContainerStarted","Data":"670e2556f1b37190d63753a240f9e4e06acc8e7ab9524ee05a1d3c73b0e9317e"} Dec 06 05:51:41 crc kubenswrapper[4706]: I1206 05:51:41.645400 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Dec 06 05:51:41 crc kubenswrapper[4706]: E1206 05:51:41.646271 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"repo-setup-edpm-deployment-openstack-edpm-ipam\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.rdoproject.org/openstack-k8s-operators/openstack-ansibleee-runner:1be4a21e16c214012ab35d75a83ce5b3c85f5ed2\\\"\"" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" podUID="7051aff0-e824-43eb-a501-3c02108f96ee" Dec 06 05:51:41 crc kubenswrapper[4706]: I1206 05:51:41.672672 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=45.672654692 podStartE2EDuration="45.672654692s" podCreationTimestamp="2025-12-06 05:50:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:51:41.666786533 +0000 UTC m=+1923.994610477" watchObservedRunningTime="2025-12-06 05:51:41.672654692 +0000 UTC m=+1924.000478636" Dec 06 05:51:41 crc kubenswrapper[4706]: I1206 05:51:41.695528 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=47.69551049 podStartE2EDuration="47.69551049s" podCreationTimestamp="2025-12-06 05:50:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 05:51:41.687902135 +0000 UTC m=+1924.015726079" watchObservedRunningTime="2025-12-06 05:51:41.69551049 +0000 UTC m=+1924.023334434" Dec 06 05:51:46 crc kubenswrapper[4706]: I1206 05:51:46.127764 4706 scope.go:117] "RemoveContainer" containerID="425ea330c8587bf3972d00f9fd843e2e23e46558268c656e08fcaddbc59d5808" Dec 06 05:51:46 crc kubenswrapper[4706]: I1206 05:51:46.169417 4706 scope.go:117] "RemoveContainer" containerID="0d5f10632e82e97d62d20ae61fde4c2d46e96e255197bc1c392ef5e3a6650508" Dec 06 05:51:46 crc kubenswrapper[4706]: I1206 05:51:46.216425 4706 scope.go:117] "RemoveContainer" containerID="7f3ee022e9a79da46433f655c4dae55bf7fa9d1fab50cb443202634fd1b1843e" Dec 06 05:51:52 crc kubenswrapper[4706]: I1206 05:51:52.036064 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:51:52 crc kubenswrapper[4706]: E1206 05:51:52.036844 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:51:53 crc kubenswrapper[4706]: I1206 05:51:53.204205 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:51:53 crc kubenswrapper[4706]: I1206 05:51:53.760779 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" event={"ID":"7051aff0-e824-43eb-a501-3c02108f96ee","Type":"ContainerStarted","Data":"ffd7f2acb21804da62df993bf8631ccb16fd127fdf0c189e5ca8b94a8af9799c"} Dec 06 05:51:53 crc kubenswrapper[4706]: I1206 05:51:53.783261 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" podStartSLOduration=1.837256779 podStartE2EDuration="38.783237671s" podCreationTimestamp="2025-12-06 05:51:15 +0000 UTC" firstStartedPulling="2025-12-06 05:51:16.255956528 +0000 UTC m=+1898.583780482" lastFinishedPulling="2025-12-06 05:51:53.20193743 +0000 UTC m=+1935.529761374" observedRunningTime="2025-12-06 05:51:53.775873302 +0000 UTC m=+1936.103697266" watchObservedRunningTime="2025-12-06 05:51:53.783237671 +0000 UTC m=+1936.111061615" Dec 06 05:51:54 crc kubenswrapper[4706]: I1206 05:51:54.774316 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Dec 06 05:51:56 crc kubenswrapper[4706]: I1206 05:51:56.540271 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Dec 06 05:52:04 crc kubenswrapper[4706]: I1206 05:52:04.037195 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:52:04 crc kubenswrapper[4706]: E1206 05:52:04.038085 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:52:16 crc kubenswrapper[4706]: I1206 05:52:16.036855 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:52:16 crc kubenswrapper[4706]: E1206 05:52:16.037751 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:52:22 crc kubenswrapper[4706]: I1206 05:52:22.068935 4706 generic.go:334] "Generic (PLEG): container finished" podID="7051aff0-e824-43eb-a501-3c02108f96ee" containerID="ffd7f2acb21804da62df993bf8631ccb16fd127fdf0c189e5ca8b94a8af9799c" exitCode=0 Dec 06 05:52:22 crc kubenswrapper[4706]: I1206 05:52:22.068991 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" event={"ID":"7051aff0-e824-43eb-a501-3c02108f96ee","Type":"ContainerDied","Data":"ffd7f2acb21804da62df993bf8631ccb16fd127fdf0c189e5ca8b94a8af9799c"} Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.488638 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.636132 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-ssh-key\") pod \"7051aff0-e824-43eb-a501-3c02108f96ee\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.636352 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-repo-setup-combined-ca-bundle\") pod \"7051aff0-e824-43eb-a501-3c02108f96ee\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.636384 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhc9t\" (UniqueName: \"kubernetes.io/projected/7051aff0-e824-43eb-a501-3c02108f96ee-kube-api-access-hhc9t\") pod \"7051aff0-e824-43eb-a501-3c02108f96ee\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.636484 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-inventory\") pod \"7051aff0-e824-43eb-a501-3c02108f96ee\" (UID: \"7051aff0-e824-43eb-a501-3c02108f96ee\") " Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.642176 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7051aff0-e824-43eb-a501-3c02108f96ee" (UID: "7051aff0-e824-43eb-a501-3c02108f96ee"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.643315 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7051aff0-e824-43eb-a501-3c02108f96ee-kube-api-access-hhc9t" (OuterVolumeSpecName: "kube-api-access-hhc9t") pod "7051aff0-e824-43eb-a501-3c02108f96ee" (UID: "7051aff0-e824-43eb-a501-3c02108f96ee"). InnerVolumeSpecName "kube-api-access-hhc9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.661969 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-inventory" (OuterVolumeSpecName: "inventory") pod "7051aff0-e824-43eb-a501-3c02108f96ee" (UID: "7051aff0-e824-43eb-a501-3c02108f96ee"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.686859 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7051aff0-e824-43eb-a501-3c02108f96ee" (UID: "7051aff0-e824-43eb-a501-3c02108f96ee"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.739270 4706 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.739318 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhc9t\" (UniqueName: \"kubernetes.io/projected/7051aff0-e824-43eb-a501-3c02108f96ee-kube-api-access-hhc9t\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.739331 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:23 crc kubenswrapper[4706]: I1206 05:52:23.739342 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7051aff0-e824-43eb-a501-3c02108f96ee-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.090201 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" event={"ID":"7051aff0-e824-43eb-a501-3c02108f96ee","Type":"ContainerDied","Data":"4b2e90203baa5a194a3ffcb887705ffaa32e0ba16c717dd0e7d124bde3121562"} Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.090497 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b2e90203baa5a194a3ffcb887705ffaa32e0ba16c717dd0e7d124bde3121562" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.090256 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.190424 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb"] Dec 06 05:52:24 crc kubenswrapper[4706]: E1206 05:52:24.190962 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7051aff0-e824-43eb-a501-3c02108f96ee" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.190981 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7051aff0-e824-43eb-a501-3c02108f96ee" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.191215 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="7051aff0-e824-43eb-a501-3c02108f96ee" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.191887 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.194481 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.194689 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.195068 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.195218 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.217992 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb"] Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.349753 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.349820 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stlfs\" (UniqueName: \"kubernetes.io/projected/0df1eee4-ea9f-4409-b17c-8b6b37985814-kube-api-access-stlfs\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.349969 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.451891 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.451971 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stlfs\" (UniqueName: \"kubernetes.io/projected/0df1eee4-ea9f-4409-b17c-8b6b37985814-kube-api-access-stlfs\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.452130 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.455866 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.457825 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.471107 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stlfs\" (UniqueName: \"kubernetes.io/projected/0df1eee4-ea9f-4409-b17c-8b6b37985814-kube-api-access-stlfs\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-dpmdb\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:24 crc kubenswrapper[4706]: I1206 05:52:24.563178 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:25 crc kubenswrapper[4706]: I1206 05:52:25.083591 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb"] Dec 06 05:52:25 crc kubenswrapper[4706]: I1206 05:52:25.101440 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" event={"ID":"0df1eee4-ea9f-4409-b17c-8b6b37985814","Type":"ContainerStarted","Data":"1fd4dae9e7f7f4e35e802523e0e119afa1e06f8984a7999e8dd9d9f0a86b0c32"} Dec 06 05:52:28 crc kubenswrapper[4706]: I1206 05:52:28.133937 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" event={"ID":"0df1eee4-ea9f-4409-b17c-8b6b37985814","Type":"ContainerStarted","Data":"90772e2e55f7034ecf9ead19f3c599d1cdf5ee46bd327e0ee6e6632997f31839"} Dec 06 05:52:28 crc kubenswrapper[4706]: I1206 05:52:28.156349 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" podStartSLOduration=2.983947624 podStartE2EDuration="4.156333042s" podCreationTimestamp="2025-12-06 05:52:24 +0000 UTC" firstStartedPulling="2025-12-06 05:52:25.087258244 +0000 UTC m=+1967.415082188" lastFinishedPulling="2025-12-06 05:52:26.259643662 +0000 UTC m=+1968.587467606" observedRunningTime="2025-12-06 05:52:28.148500451 +0000 UTC m=+1970.476324405" watchObservedRunningTime="2025-12-06 05:52:28.156333042 +0000 UTC m=+1970.484156986" Dec 06 05:52:30 crc kubenswrapper[4706]: I1206 05:52:30.036429 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:52:30 crc kubenswrapper[4706]: E1206 05:52:30.036970 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:52:30 crc kubenswrapper[4706]: I1206 05:52:30.149899 4706 generic.go:334] "Generic (PLEG): container finished" podID="0df1eee4-ea9f-4409-b17c-8b6b37985814" containerID="90772e2e55f7034ecf9ead19f3c599d1cdf5ee46bd327e0ee6e6632997f31839" exitCode=0 Dec 06 05:52:30 crc kubenswrapper[4706]: I1206 05:52:30.149941 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" event={"ID":"0df1eee4-ea9f-4409-b17c-8b6b37985814","Type":"ContainerDied","Data":"90772e2e55f7034ecf9ead19f3c599d1cdf5ee46bd327e0ee6e6632997f31839"} Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.615217 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.688740 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-inventory\") pod \"0df1eee4-ea9f-4409-b17c-8b6b37985814\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.688935 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-ssh-key\") pod \"0df1eee4-ea9f-4409-b17c-8b6b37985814\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.689080 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stlfs\" (UniqueName: \"kubernetes.io/projected/0df1eee4-ea9f-4409-b17c-8b6b37985814-kube-api-access-stlfs\") pod \"0df1eee4-ea9f-4409-b17c-8b6b37985814\" (UID: \"0df1eee4-ea9f-4409-b17c-8b6b37985814\") " Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.694257 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0df1eee4-ea9f-4409-b17c-8b6b37985814-kube-api-access-stlfs" (OuterVolumeSpecName: "kube-api-access-stlfs") pod "0df1eee4-ea9f-4409-b17c-8b6b37985814" (UID: "0df1eee4-ea9f-4409-b17c-8b6b37985814"). InnerVolumeSpecName "kube-api-access-stlfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.715592 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0df1eee4-ea9f-4409-b17c-8b6b37985814" (UID: "0df1eee4-ea9f-4409-b17c-8b6b37985814"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.716320 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-inventory" (OuterVolumeSpecName: "inventory") pod "0df1eee4-ea9f-4409-b17c-8b6b37985814" (UID: "0df1eee4-ea9f-4409-b17c-8b6b37985814"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.791904 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.791942 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0df1eee4-ea9f-4409-b17c-8b6b37985814-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:31.791959 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stlfs\" (UniqueName: \"kubernetes.io/projected/0df1eee4-ea9f-4409-b17c-8b6b37985814-kube-api-access-stlfs\") on node \"crc\" DevicePath \"\"" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.171119 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" event={"ID":"0df1eee4-ea9f-4409-b17c-8b6b37985814","Type":"ContainerDied","Data":"1fd4dae9e7f7f4e35e802523e0e119afa1e06f8984a7999e8dd9d9f0a86b0c32"} Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.171159 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fd4dae9e7f7f4e35e802523e0e119afa1e06f8984a7999e8dd9d9f0a86b0c32" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.171227 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-dpmdb" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.240580 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv"] Dec 06 05:52:34 crc kubenswrapper[4706]: E1206 05:52:32.241034 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1eee4-ea9f-4409-b17c-8b6b37985814" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.241069 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1eee4-ea9f-4409-b17c-8b6b37985814" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.241292 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1eee4-ea9f-4409-b17c-8b6b37985814" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.241990 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.243626 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.243811 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.244014 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.244195 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.249446 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv"] Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.300928 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.301129 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xllx\" (UniqueName: \"kubernetes.io/projected/ab55260b-0613-4be9-b0e2-e1470cdb018d-kube-api-access-2xllx\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.301170 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.301199 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.403062 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.403183 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.403313 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xllx\" (UniqueName: \"kubernetes.io/projected/ab55260b-0613-4be9-b0e2-e1470cdb018d-kube-api-access-2xllx\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.403348 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.408406 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.408679 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.408795 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.423686 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xllx\" (UniqueName: \"kubernetes.io/projected/ab55260b-0613-4be9-b0e2-e1470cdb018d-kube-api-access-2xllx\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:32.560636 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:52:34 crc kubenswrapper[4706]: I1206 05:52:34.713318 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv"] Dec 06 05:52:35 crc kubenswrapper[4706]: I1206 05:52:35.203197 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" event={"ID":"ab55260b-0613-4be9-b0e2-e1470cdb018d","Type":"ContainerStarted","Data":"0c3839a7812f90a8279a8e2169d97cc52ff89ff0591b253def63565d8d056e73"} Dec 06 05:52:36 crc kubenswrapper[4706]: I1206 05:52:36.220662 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" event={"ID":"ab55260b-0613-4be9-b0e2-e1470cdb018d","Type":"ContainerStarted","Data":"4b0840657b5eba8e71dddb322ac56a4f7b29f26f7020ef29b5f6c682587d51b1"} Dec 06 05:52:36 crc kubenswrapper[4706]: I1206 05:52:36.245217 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" podStartSLOduration=3.822278346 podStartE2EDuration="4.245199552s" podCreationTimestamp="2025-12-06 05:52:32 +0000 UTC" firstStartedPulling="2025-12-06 05:52:34.726527752 +0000 UTC m=+1977.054351696" lastFinishedPulling="2025-12-06 05:52:35.149448958 +0000 UTC m=+1977.477272902" observedRunningTime="2025-12-06 05:52:36.237585626 +0000 UTC m=+1978.565409580" watchObservedRunningTime="2025-12-06 05:52:36.245199552 +0000 UTC m=+1978.573023496" Dec 06 05:52:41 crc kubenswrapper[4706]: I1206 05:52:41.036833 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:52:41 crc kubenswrapper[4706]: E1206 05:52:41.037670 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:52:54 crc kubenswrapper[4706]: I1206 05:52:54.035654 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:52:54 crc kubenswrapper[4706]: E1206 05:52:54.036570 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:53:05 crc kubenswrapper[4706]: I1206 05:53:05.036175 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:53:05 crc kubenswrapper[4706]: E1206 05:53:05.037182 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 05:53:20 crc kubenswrapper[4706]: I1206 05:53:20.036331 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:53:21 crc kubenswrapper[4706]: I1206 05:53:21.630172 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"802a2997475c34e3468ebd06845af6e76e5a777088e134200058afb534cc75ab"} Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.184955 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-p55d9"] Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.187538 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.196382 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p55d9"] Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.330553 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-catalog-content\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.330680 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-utilities\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.330750 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls8mm\" (UniqueName: \"kubernetes.io/projected/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-kube-api-access-ls8mm\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.434037 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-catalog-content\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.434352 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-utilities\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.434471 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls8mm\" (UniqueName: \"kubernetes.io/projected/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-kube-api-access-ls8mm\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.435197 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-utilities\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.434776 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-catalog-content\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.455828 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls8mm\" (UniqueName: \"kubernetes.io/projected/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-kube-api-access-ls8mm\") pod \"redhat-marketplace-p55d9\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:23 crc kubenswrapper[4706]: I1206 05:54:23.509897 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:24 crc kubenswrapper[4706]: I1206 05:54:24.006318 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-p55d9"] Dec 06 05:54:24 crc kubenswrapper[4706]: W1206 05:54:24.013397 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8da3fb36_c484_49dd_a7fc_9e2e28f3163b.slice/crio-f8f49e4b6870f4d108591c1873b0c3e9221e425b2a5cd30e97b04e6ef96c78aa WatchSource:0}: Error finding container f8f49e4b6870f4d108591c1873b0c3e9221e425b2a5cd30e97b04e6ef96c78aa: Status 404 returned error can't find the container with id f8f49e4b6870f4d108591c1873b0c3e9221e425b2a5cd30e97b04e6ef96c78aa Dec 06 05:54:24 crc kubenswrapper[4706]: I1206 05:54:24.252135 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerStarted","Data":"f8f49e4b6870f4d108591c1873b0c3e9221e425b2a5cd30e97b04e6ef96c78aa"} Dec 06 05:54:25 crc kubenswrapper[4706]: I1206 05:54:25.264320 4706 generic.go:334] "Generic (PLEG): container finished" podID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerID="93750e1ab34c2b7f66f9ec8e202efb9675c824e94bac22e39cb1e89134de1f02" exitCode=0 Dec 06 05:54:25 crc kubenswrapper[4706]: I1206 05:54:25.264411 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerDied","Data":"93750e1ab34c2b7f66f9ec8e202efb9675c824e94bac22e39cb1e89134de1f02"} Dec 06 05:54:25 crc kubenswrapper[4706]: I1206 05:54:25.267135 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:54:26 crc kubenswrapper[4706]: I1206 05:54:26.276699 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerStarted","Data":"bf194605e5d05253aefc29c8cc947eb3f4fdfd16935ea002122e3f6fe8276f09"} Dec 06 05:54:26 crc kubenswrapper[4706]: E1206 05:54:26.437379 4706 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8da3fb36_c484_49dd_a7fc_9e2e28f3163b.slice/crio-bf194605e5d05253aefc29c8cc947eb3f4fdfd16935ea002122e3f6fe8276f09.scope\": RecentStats: unable to find data in memory cache]" Dec 06 05:54:27 crc kubenswrapper[4706]: I1206 05:54:27.287179 4706 generic.go:334] "Generic (PLEG): container finished" podID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerID="bf194605e5d05253aefc29c8cc947eb3f4fdfd16935ea002122e3f6fe8276f09" exitCode=0 Dec 06 05:54:27 crc kubenswrapper[4706]: I1206 05:54:27.287240 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerDied","Data":"bf194605e5d05253aefc29c8cc947eb3f4fdfd16935ea002122e3f6fe8276f09"} Dec 06 05:54:31 crc kubenswrapper[4706]: I1206 05:54:31.354065 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerStarted","Data":"4db27390b1e066eb2d47902cbff907dd67f1cfd9ecaf4179d45c45d64373156e"} Dec 06 05:54:31 crc kubenswrapper[4706]: I1206 05:54:31.376908 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-p55d9" podStartSLOduration=2.884165778 podStartE2EDuration="8.376888319s" podCreationTimestamp="2025-12-06 05:54:23 +0000 UTC" firstStartedPulling="2025-12-06 05:54:25.266907124 +0000 UTC m=+2087.594731068" lastFinishedPulling="2025-12-06 05:54:30.759629665 +0000 UTC m=+2093.087453609" observedRunningTime="2025-12-06 05:54:31.371920994 +0000 UTC m=+2093.699744938" watchObservedRunningTime="2025-12-06 05:54:31.376888319 +0000 UTC m=+2093.704712273" Dec 06 05:54:33 crc kubenswrapper[4706]: I1206 05:54:33.510983 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:33 crc kubenswrapper[4706]: I1206 05:54:33.511570 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:33 crc kubenswrapper[4706]: I1206 05:54:33.573853 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:43 crc kubenswrapper[4706]: I1206 05:54:43.557568 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:43 crc kubenswrapper[4706]: I1206 05:54:43.603152 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p55d9"] Dec 06 05:54:44 crc kubenswrapper[4706]: I1206 05:54:44.469010 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-p55d9" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="registry-server" containerID="cri-o://4db27390b1e066eb2d47902cbff907dd67f1cfd9ecaf4179d45c45d64373156e" gracePeriod=2 Dec 06 05:54:46 crc kubenswrapper[4706]: I1206 05:54:46.506936 4706 generic.go:334] "Generic (PLEG): container finished" podID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerID="4db27390b1e066eb2d47902cbff907dd67f1cfd9ecaf4179d45c45d64373156e" exitCode=0 Dec 06 05:54:46 crc kubenswrapper[4706]: I1206 05:54:46.507032 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerDied","Data":"4db27390b1e066eb2d47902cbff907dd67f1cfd9ecaf4179d45c45d64373156e"} Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.463751 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.518344 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-p55d9" event={"ID":"8da3fb36-c484-49dd-a7fc-9e2e28f3163b","Type":"ContainerDied","Data":"f8f49e4b6870f4d108591c1873b0c3e9221e425b2a5cd30e97b04e6ef96c78aa"} Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.518400 4706 scope.go:117] "RemoveContainer" containerID="4db27390b1e066eb2d47902cbff907dd67f1cfd9ecaf4179d45c45d64373156e" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.518415 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-p55d9" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.521815 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-utilities\") pod \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.522044 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ls8mm\" (UniqueName: \"kubernetes.io/projected/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-kube-api-access-ls8mm\") pod \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.522215 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-catalog-content\") pod \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\" (UID: \"8da3fb36-c484-49dd-a7fc-9e2e28f3163b\") " Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.522586 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-utilities" (OuterVolumeSpecName: "utilities") pod "8da3fb36-c484-49dd-a7fc-9e2e28f3163b" (UID: "8da3fb36-c484-49dd-a7fc-9e2e28f3163b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.522847 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.529445 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-kube-api-access-ls8mm" (OuterVolumeSpecName: "kube-api-access-ls8mm") pod "8da3fb36-c484-49dd-a7fc-9e2e28f3163b" (UID: "8da3fb36-c484-49dd-a7fc-9e2e28f3163b"). InnerVolumeSpecName "kube-api-access-ls8mm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.537416 4706 scope.go:117] "RemoveContainer" containerID="bf194605e5d05253aefc29c8cc947eb3f4fdfd16935ea002122e3f6fe8276f09" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.542230 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8da3fb36-c484-49dd-a7fc-9e2e28f3163b" (UID: "8da3fb36-c484-49dd-a7fc-9e2e28f3163b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.586345 4706 scope.go:117] "RemoveContainer" containerID="93750e1ab34c2b7f66f9ec8e202efb9675c824e94bac22e39cb1e89134de1f02" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.624023 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ls8mm\" (UniqueName: \"kubernetes.io/projected/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-kube-api-access-ls8mm\") on node \"crc\" DevicePath \"\"" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.624067 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8da3fb36-c484-49dd-a7fc-9e2e28f3163b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.848039 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-p55d9"] Dec 06 05:54:47 crc kubenswrapper[4706]: I1206 05:54:47.856399 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-p55d9"] Dec 06 05:54:48 crc kubenswrapper[4706]: I1206 05:54:48.048576 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" path="/var/lib/kubelet/pods/8da3fb36-c484-49dd-a7fc-9e2e28f3163b/volumes" Dec 06 05:55:04 crc kubenswrapper[4706]: I1206 05:55:04.049431 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2rfsr"] Dec 06 05:55:04 crc kubenswrapper[4706]: I1206 05:55:04.054918 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-d65f-account-create-update-bpbpn"] Dec 06 05:55:04 crc kubenswrapper[4706]: I1206 05:55:04.066281 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2rfsr"] Dec 06 05:55:04 crc kubenswrapper[4706]: I1206 05:55:04.075441 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-d65f-account-create-update-bpbpn"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.032653 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-g9s87"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.043646 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-2nftm"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.057485 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-3a0e-account-create-update-ctxbc"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.066106 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-nwqqh"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.074290 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-g9s87"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.082774 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-2nftm"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.091759 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-nwqqh"] Dec 06 05:55:05 crc kubenswrapper[4706]: I1206 05:55:05.102230 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-3a0e-account-create-update-ctxbc"] Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.031143 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f39c-account-create-update-xpw6m"] Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.048213 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f408368-233a-4ada-86e7-d4125b2a1bb2" path="/var/lib/kubelet/pods/4f408368-233a-4ada-86e7-d4125b2a1bb2/volumes" Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.049319 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d9537ae-09b5-49d5-a0e5-6d8e6e992170" path="/var/lib/kubelet/pods/6d9537ae-09b5-49d5-a0e5-6d8e6e992170/volumes" Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.050029 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f644703-222c-4d96-8473-8856ee25fb91" path="/var/lib/kubelet/pods/8f644703-222c-4d96-8473-8856ee25fb91/volumes" Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.050726 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0b7d48d-3568-4c7a-909a-210e079a3a1b" path="/var/lib/kubelet/pods/d0b7d48d-3568-4c7a-909a-210e079a3a1b/volumes" Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.052024 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed44107e-4d92-4895-961b-4cce9234319c" path="/var/lib/kubelet/pods/ed44107e-4d92-4895-961b-4cce9234319c/volumes" Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.052724 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6351b3a-0675-4cf0-a1dd-fe6d80cef630" path="/var/lib/kubelet/pods/f6351b3a-0675-4cf0-a1dd-fe6d80cef630/volumes" Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.053374 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-245b-account-create-update-fr24l"] Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.053410 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-245b-account-create-update-fr24l"] Dec 06 05:55:06 crc kubenswrapper[4706]: I1206 05:55:06.057978 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f39c-account-create-update-xpw6m"] Dec 06 05:55:07 crc kubenswrapper[4706]: I1206 05:55:07.029647 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-ab7c-account-create-update-knbmr"] Dec 06 05:55:07 crc kubenswrapper[4706]: I1206 05:55:07.038147 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-tnbqb"] Dec 06 05:55:07 crc kubenswrapper[4706]: I1206 05:55:07.047338 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-tnbqb"] Dec 06 05:55:07 crc kubenswrapper[4706]: I1206 05:55:07.055362 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-ab7c-account-create-update-knbmr"] Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.029004 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-bd4e-account-create-update-g6ws8"] Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.050265 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65da32e7-79d5-4b2a-937c-8890711c77f4" path="/var/lib/kubelet/pods/65da32e7-79d5-4b2a-937c-8890711c77f4/volumes" Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.050953 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ebc347a-c311-41c2-bed4-0fcd22e26342" path="/var/lib/kubelet/pods/6ebc347a-c311-41c2-bed4-0fcd22e26342/volumes" Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.051561 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1854690-b0e0-4052-95a6-6951260cdb0b" path="/var/lib/kubelet/pods/c1854690-b0e0-4052-95a6-6951260cdb0b/volumes" Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.052438 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8b901a2-272b-4021-b8fc-f2e0051e68ce" path="/var/lib/kubelet/pods/e8b901a2-272b-4021-b8fc-f2e0051e68ce/volumes" Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.053658 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-ks7jq"] Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.057706 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-bd4e-account-create-update-g6ws8"] Dec 06 05:55:08 crc kubenswrapper[4706]: I1206 05:55:08.066338 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-ks7jq"] Dec 06 05:55:10 crc kubenswrapper[4706]: I1206 05:55:10.047193 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="336ef851-faf5-4c3d-ad1f-316af4dedd9a" path="/var/lib/kubelet/pods/336ef851-faf5-4c3d-ad1f-316af4dedd9a/volumes" Dec 06 05:55:10 crc kubenswrapper[4706]: I1206 05:55:10.048181 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e0f3a5c-0b70-43c6-9b1e-5729f502488e" path="/var/lib/kubelet/pods/5e0f3a5c-0b70-43c6-9b1e-5729f502488e/volumes" Dec 06 05:55:35 crc kubenswrapper[4706]: I1206 05:55:35.961451 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:55:35 crc kubenswrapper[4706]: I1206 05:55:35.962027 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:55:39 crc kubenswrapper[4706]: I1206 05:55:39.040919 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-97dzf"] Dec 06 05:55:39 crc kubenswrapper[4706]: I1206 05:55:39.049449 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-97dzf"] Dec 06 05:55:40 crc kubenswrapper[4706]: I1206 05:55:40.049249 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406" path="/var/lib/kubelet/pods/e8a4f7bd-9f4d-4f4f-94e4-97b26f1f7406/volumes" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.408505 4706 scope.go:117] "RemoveContainer" containerID="9b0529fd61421d18bdbda5094f00317f5ecb5173b66ff1211f913dd36fa63b73" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.438736 4706 scope.go:117] "RemoveContainer" containerID="f04742d6b45408f5cdc0a29d85709adfd76649cd14446123eabeacffc7dbcfb6" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.484421 4706 scope.go:117] "RemoveContainer" containerID="7372b11b0d793e0c4ed3badda996c3d9e958a6bbd1443ee7f493a37a14ef57d6" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.544137 4706 scope.go:117] "RemoveContainer" containerID="824f2d7eacdae14d064a6787d0e8a685686da802e900559bf915c1c4f03c0cad" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.583839 4706 scope.go:117] "RemoveContainer" containerID="31c8e56e7f09c4baba93092443668f315c5ddfad6153dc36d306f07a63b30c43" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.644011 4706 scope.go:117] "RemoveContainer" containerID="b9cde94861c04f85797115def145b09aee430cb3659742748e6d0636b337746c" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.692879 4706 scope.go:117] "RemoveContainer" containerID="b22222290f9507501338cc9487939041cdd504374d0236cb6493aadca78433e6" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.739111 4706 scope.go:117] "RemoveContainer" containerID="e330223da4a0f81d4de8b3096784be3e084112807ae22f2d3bff09d68c40af7f" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.760368 4706 scope.go:117] "RemoveContainer" containerID="bdc8b764068139ac803c9b9f0f609fb64cf6246f8b3bdab9309e44ff0058e233" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.799040 4706 scope.go:117] "RemoveContainer" containerID="aaa55450163d9bee941bd72c27cd9273c2c582af776937dab02384a3670e1ab0" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.879625 4706 scope.go:117] "RemoveContainer" containerID="40400c374beb6242eb0b519fbd1c5e73b581aabc02097a6118d3beebc0532375" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.907351 4706 scope.go:117] "RemoveContainer" containerID="1b84902badaf8dc9269c5bff4c84f4bf4adfbc7a769092875c7c660e3d3f202c" Dec 06 05:55:46 crc kubenswrapper[4706]: I1206 05:55:46.935262 4706 scope.go:117] "RemoveContainer" containerID="757c5f7569f581dcd7aa8beeecddc19ffc2346ec6e6957fdc9b95860d1141a7a" Dec 06 05:56:05 crc kubenswrapper[4706]: I1206 05:56:05.962280 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:56:05 crc kubenswrapper[4706]: I1206 05:56:05.963471 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:56:06 crc kubenswrapper[4706]: I1206 05:56:06.241176 4706 generic.go:334] "Generic (PLEG): container finished" podID="ab55260b-0613-4be9-b0e2-e1470cdb018d" containerID="4b0840657b5eba8e71dddb322ac56a4f7b29f26f7020ef29b5f6c682587d51b1" exitCode=0 Dec 06 05:56:06 crc kubenswrapper[4706]: I1206 05:56:06.241224 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" event={"ID":"ab55260b-0613-4be9-b0e2-e1470cdb018d","Type":"ContainerDied","Data":"4b0840657b5eba8e71dddb322ac56a4f7b29f26f7020ef29b5f6c682587d51b1"} Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.633338 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.787507 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-ssh-key\") pod \"ab55260b-0613-4be9-b0e2-e1470cdb018d\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.787578 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-bootstrap-combined-ca-bundle\") pod \"ab55260b-0613-4be9-b0e2-e1470cdb018d\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.787625 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-inventory\") pod \"ab55260b-0613-4be9-b0e2-e1470cdb018d\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.787713 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2xllx\" (UniqueName: \"kubernetes.io/projected/ab55260b-0613-4be9-b0e2-e1470cdb018d-kube-api-access-2xllx\") pod \"ab55260b-0613-4be9-b0e2-e1470cdb018d\" (UID: \"ab55260b-0613-4be9-b0e2-e1470cdb018d\") " Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.794506 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab55260b-0613-4be9-b0e2-e1470cdb018d-kube-api-access-2xllx" (OuterVolumeSpecName: "kube-api-access-2xllx") pod "ab55260b-0613-4be9-b0e2-e1470cdb018d" (UID: "ab55260b-0613-4be9-b0e2-e1470cdb018d"). InnerVolumeSpecName "kube-api-access-2xllx". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.794889 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "ab55260b-0613-4be9-b0e2-e1470cdb018d" (UID: "ab55260b-0613-4be9-b0e2-e1470cdb018d"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.817937 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-inventory" (OuterVolumeSpecName: "inventory") pod "ab55260b-0613-4be9-b0e2-e1470cdb018d" (UID: "ab55260b-0613-4be9-b0e2-e1470cdb018d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.822128 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ab55260b-0613-4be9-b0e2-e1470cdb018d" (UID: "ab55260b-0613-4be9-b0e2-e1470cdb018d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.890141 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.890432 4706 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.890451 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ab55260b-0613-4be9-b0e2-e1470cdb018d-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 05:56:07 crc kubenswrapper[4706]: I1206 05:56:07.890462 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2xllx\" (UniqueName: \"kubernetes.io/projected/ab55260b-0613-4be9-b0e2-e1470cdb018d-kube-api-access-2xllx\") on node \"crc\" DevicePath \"\"" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.261128 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" event={"ID":"ab55260b-0613-4be9-b0e2-e1470cdb018d","Type":"ContainerDied","Data":"0c3839a7812f90a8279a8e2169d97cc52ff89ff0591b253def63565d8d056e73"} Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.261169 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c3839a7812f90a8279a8e2169d97cc52ff89ff0591b253def63565d8d056e73" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.261222 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.343828 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x"] Dec 06 05:56:08 crc kubenswrapper[4706]: E1206 05:56:08.344242 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="extract-content" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.344264 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="extract-content" Dec 06 05:56:08 crc kubenswrapper[4706]: E1206 05:56:08.344288 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab55260b-0613-4be9-b0e2-e1470cdb018d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.344295 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab55260b-0613-4be9-b0e2-e1470cdb018d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 06 05:56:08 crc kubenswrapper[4706]: E1206 05:56:08.344313 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="registry-server" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.344322 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="registry-server" Dec 06 05:56:08 crc kubenswrapper[4706]: E1206 05:56:08.344345 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="extract-utilities" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.344352 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="extract-utilities" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.344592 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab55260b-0613-4be9-b0e2-e1470cdb018d" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.344618 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="8da3fb36-c484-49dd-a7fc-9e2e28f3163b" containerName="registry-server" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.347288 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.349643 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.350033 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.350184 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.351779 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.362721 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x"] Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.501367 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.501534 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9c77s\" (UniqueName: \"kubernetes.io/projected/582f8518-3c87-496d-b057-b2f66658a731-kube-api-access-9c77s\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.501578 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.603193 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.603293 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.603394 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9c77s\" (UniqueName: \"kubernetes.io/projected/582f8518-3c87-496d-b057-b2f66658a731-kube-api-access-9c77s\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.607592 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.609936 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.625298 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9c77s\" (UniqueName: \"kubernetes.io/projected/582f8518-3c87-496d-b057-b2f66658a731-kube-api-access-9c77s\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:08 crc kubenswrapper[4706]: I1206 05:56:08.669840 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:56:09 crc kubenswrapper[4706]: I1206 05:56:09.232796 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x"] Dec 06 05:56:09 crc kubenswrapper[4706]: I1206 05:56:09.276275 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" event={"ID":"582f8518-3c87-496d-b057-b2f66658a731","Type":"ContainerStarted","Data":"62fca0a570e18bddda23d1a29bf62ef6d7c187ed613bedcbe485ec8f9022c38e"} Dec 06 05:56:10 crc kubenswrapper[4706]: I1206 05:56:10.286394 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" event={"ID":"582f8518-3c87-496d-b057-b2f66658a731","Type":"ContainerStarted","Data":"f107c4c1a92968531c1a6256c9a1ab5a1de16e914d058be1d3196a9143d52393"} Dec 06 05:56:11 crc kubenswrapper[4706]: I1206 05:56:11.322342 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" podStartSLOduration=2.587284682 podStartE2EDuration="3.322320284s" podCreationTimestamp="2025-12-06 05:56:08 +0000 UTC" firstStartedPulling="2025-12-06 05:56:09.24140808 +0000 UTC m=+2191.569232024" lastFinishedPulling="2025-12-06 05:56:09.976443682 +0000 UTC m=+2192.304267626" observedRunningTime="2025-12-06 05:56:11.313935157 +0000 UTC m=+2193.641759101" watchObservedRunningTime="2025-12-06 05:56:11.322320284 +0000 UTC m=+2193.650144248" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.203708 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5nx52"] Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.208009 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.243790 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5nx52"] Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.354563 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bf72222-ee0b-41a2-877e-1bd5c83b392a-utilities\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.354684 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rblsn\" (UniqueName: \"kubernetes.io/projected/4bf72222-ee0b-41a2-877e-1bd5c83b392a-kube-api-access-rblsn\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.354739 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bf72222-ee0b-41a2-877e-1bd5c83b392a-catalog-content\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.457205 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bf72222-ee0b-41a2-877e-1bd5c83b392a-utilities\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.457313 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rblsn\" (UniqueName: \"kubernetes.io/projected/4bf72222-ee0b-41a2-877e-1bd5c83b392a-kube-api-access-rblsn\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.457391 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bf72222-ee0b-41a2-877e-1bd5c83b392a-catalog-content\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.457819 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4bf72222-ee0b-41a2-877e-1bd5c83b392a-utilities\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.457931 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4bf72222-ee0b-41a2-877e-1bd5c83b392a-catalog-content\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.482323 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rblsn\" (UniqueName: \"kubernetes.io/projected/4bf72222-ee0b-41a2-877e-1bd5c83b392a-kube-api-access-rblsn\") pod \"community-operators-5nx52\" (UID: \"4bf72222-ee0b-41a2-877e-1bd5c83b392a\") " pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:31 crc kubenswrapper[4706]: I1206 05:56:31.542146 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:32 crc kubenswrapper[4706]: I1206 05:56:32.089176 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5nx52"] Dec 06 05:56:32 crc kubenswrapper[4706]: I1206 05:56:32.502075 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5nx52" event={"ID":"4bf72222-ee0b-41a2-877e-1bd5c83b392a","Type":"ContainerStarted","Data":"18f6c8ec863efce6a43103b6e1009975437702f8fbd6da2bcaf2a88e39bf14e5"} Dec 06 05:56:33 crc kubenswrapper[4706]: I1206 05:56:33.511873 4706 generic.go:334] "Generic (PLEG): container finished" podID="4bf72222-ee0b-41a2-877e-1bd5c83b392a" containerID="fe920b1fd6480227dd4df61de2edf6c934202d6c89776f2eb6e053843d92f521" exitCode=0 Dec 06 05:56:33 crc kubenswrapper[4706]: I1206 05:56:33.512075 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5nx52" event={"ID":"4bf72222-ee0b-41a2-877e-1bd5c83b392a","Type":"ContainerDied","Data":"fe920b1fd6480227dd4df61de2edf6c934202d6c89776f2eb6e053843d92f521"} Dec 06 05:56:35 crc kubenswrapper[4706]: I1206 05:56:35.961303 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:56:35 crc kubenswrapper[4706]: I1206 05:56:35.962011 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:56:35 crc kubenswrapper[4706]: I1206 05:56:35.962087 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 05:56:35 crc kubenswrapper[4706]: I1206 05:56:35.963150 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"802a2997475c34e3468ebd06845af6e76e5a777088e134200058afb534cc75ab"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 05:56:35 crc kubenswrapper[4706]: I1206 05:56:35.963221 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://802a2997475c34e3468ebd06845af6e76e5a777088e134200058afb534cc75ab" gracePeriod=600 Dec 06 05:56:36 crc kubenswrapper[4706]: I1206 05:56:36.541495 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="802a2997475c34e3468ebd06845af6e76e5a777088e134200058afb534cc75ab" exitCode=0 Dec 06 05:56:36 crc kubenswrapper[4706]: I1206 05:56:36.541538 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"802a2997475c34e3468ebd06845af6e76e5a777088e134200058afb534cc75ab"} Dec 06 05:56:36 crc kubenswrapper[4706]: I1206 05:56:36.541594 4706 scope.go:117] "RemoveContainer" containerID="6ba3f967cae8dd60be6d55f57fee4d58b3e422c61831279a39b122727eb4359a" Dec 06 05:56:40 crc kubenswrapper[4706]: I1206 05:56:40.588918 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb"} Dec 06 05:56:43 crc kubenswrapper[4706]: I1206 05:56:43.046712 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-7xkwl"] Dec 06 05:56:43 crc kubenswrapper[4706]: I1206 05:56:43.058664 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-7xkwl"] Dec 06 05:56:44 crc kubenswrapper[4706]: I1206 05:56:44.046885 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="965980f7-e73f-450a-b431-61e071a0361f" path="/var/lib/kubelet/pods/965980f7-e73f-450a-b431-61e071a0361f/volumes" Dec 06 05:56:45 crc kubenswrapper[4706]: I1206 05:56:45.714248 4706 generic.go:334] "Generic (PLEG): container finished" podID="4bf72222-ee0b-41a2-877e-1bd5c83b392a" containerID="f72f6be0e3a32426368ecd84a9dff6f9804861b7e63662d69eac9698b527e790" exitCode=0 Dec 06 05:56:45 crc kubenswrapper[4706]: I1206 05:56:45.714370 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5nx52" event={"ID":"4bf72222-ee0b-41a2-877e-1bd5c83b392a","Type":"ContainerDied","Data":"f72f6be0e3a32426368ecd84a9dff6f9804861b7e63662d69eac9698b527e790"} Dec 06 05:56:47 crc kubenswrapper[4706]: I1206 05:56:47.177459 4706 scope.go:117] "RemoveContainer" containerID="bca96de5277a5c6bc2de53305b2e4cdf26ffdbbc140afb17297974ae84b20815" Dec 06 05:56:47 crc kubenswrapper[4706]: I1206 05:56:47.215631 4706 scope.go:117] "RemoveContainer" containerID="0401c42c4a0f951f015e24de34691af48b08abe72636f9e57c5b99d401658074" Dec 06 05:56:47 crc kubenswrapper[4706]: I1206 05:56:47.245771 4706 scope.go:117] "RemoveContainer" containerID="1f3a99267e795dff0557aaf03d838400653c1ee98d08e931b61dea17094f4259" Dec 06 05:56:47 crc kubenswrapper[4706]: I1206 05:56:47.734720 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5nx52" event={"ID":"4bf72222-ee0b-41a2-877e-1bd5c83b392a","Type":"ContainerStarted","Data":"01b6f8d4b507c9092171db15d6c2b9cc17cfc98fd42dfc9bc39e051710426111"} Dec 06 05:56:47 crc kubenswrapper[4706]: I1206 05:56:47.765530 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5nx52" podStartSLOduration=3.944777726 podStartE2EDuration="16.765511646s" podCreationTimestamp="2025-12-06 05:56:31 +0000 UTC" firstStartedPulling="2025-12-06 05:56:33.514064408 +0000 UTC m=+2215.841888352" lastFinishedPulling="2025-12-06 05:56:46.334798328 +0000 UTC m=+2228.662622272" observedRunningTime="2025-12-06 05:56:47.759358739 +0000 UTC m=+2230.087182683" watchObservedRunningTime="2025-12-06 05:56:47.765511646 +0000 UTC m=+2230.093335590" Dec 06 05:56:51 crc kubenswrapper[4706]: I1206 05:56:51.542527 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:51 crc kubenswrapper[4706]: I1206 05:56:51.543004 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:51 crc kubenswrapper[4706]: I1206 05:56:51.589659 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:51 crc kubenswrapper[4706]: I1206 05:56:51.827293 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5nx52" Dec 06 05:56:52 crc kubenswrapper[4706]: I1206 05:56:52.285295 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5nx52"] Dec 06 05:56:52 crc kubenswrapper[4706]: I1206 05:56:52.665028 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xn9dq"] Dec 06 05:56:52 crc kubenswrapper[4706]: I1206 05:56:52.665650 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xn9dq" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="registry-server" containerID="cri-o://5d0ed54994801eb3f1d781283344c0e621fa0985d244c0ede079797d77175d7f" gracePeriod=2 Dec 06 05:56:56 crc kubenswrapper[4706]: I1206 05:56:56.049672 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-bs4sf"] Dec 06 05:56:56 crc kubenswrapper[4706]: I1206 05:56:56.050736 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-bs4sf"] Dec 06 05:56:58 crc kubenswrapper[4706]: I1206 05:56:58.049207 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a7ff6dd-4101-4650-a9b5-af050055f631" path="/var/lib/kubelet/pods/2a7ff6dd-4101-4650-a9b5-af050055f631/volumes" Dec 06 05:56:58 crc kubenswrapper[4706]: I1206 05:56:58.935593 4706 generic.go:334] "Generic (PLEG): container finished" podID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerID="5d0ed54994801eb3f1d781283344c0e621fa0985d244c0ede079797d77175d7f" exitCode=0 Dec 06 05:56:58 crc kubenswrapper[4706]: I1206 05:56:58.935674 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerDied","Data":"5d0ed54994801eb3f1d781283344c0e621fa0985d244c0ede079797d77175d7f"} Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.505699 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.648612 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-catalog-content\") pod \"9d06d7a2-470f-433c-870b-c78293eeb02b\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.648859 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msw6b\" (UniqueName: \"kubernetes.io/projected/9d06d7a2-470f-433c-870b-c78293eeb02b-kube-api-access-msw6b\") pod \"9d06d7a2-470f-433c-870b-c78293eeb02b\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.648897 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-utilities\") pod \"9d06d7a2-470f-433c-870b-c78293eeb02b\" (UID: \"9d06d7a2-470f-433c-870b-c78293eeb02b\") " Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.649561 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-utilities" (OuterVolumeSpecName: "utilities") pod "9d06d7a2-470f-433c-870b-c78293eeb02b" (UID: "9d06d7a2-470f-433c-870b-c78293eeb02b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.653896 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d06d7a2-470f-433c-870b-c78293eeb02b-kube-api-access-msw6b" (OuterVolumeSpecName: "kube-api-access-msw6b") pod "9d06d7a2-470f-433c-870b-c78293eeb02b" (UID: "9d06d7a2-470f-433c-870b-c78293eeb02b"). InnerVolumeSpecName "kube-api-access-msw6b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.751327 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msw6b\" (UniqueName: \"kubernetes.io/projected/9d06d7a2-470f-433c-870b-c78293eeb02b-kube-api-access-msw6b\") on node \"crc\" DevicePath \"\"" Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.751366 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.946986 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xn9dq" event={"ID":"9d06d7a2-470f-433c-870b-c78293eeb02b","Type":"ContainerDied","Data":"a94d8b135a310d805ad745e17345e5f787bcc2aea12471d1cf986abba4316a58"} Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.947037 4706 scope.go:117] "RemoveContainer" containerID="5d0ed54994801eb3f1d781283344c0e621fa0985d244c0ede079797d77175d7f" Dec 06 05:56:59 crc kubenswrapper[4706]: I1206 05:56:59.947102 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xn9dq" Dec 06 05:57:01 crc kubenswrapper[4706]: I1206 05:57:01.969388 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9d06d7a2-470f-433c-870b-c78293eeb02b" (UID: "9d06d7a2-470f-433c-870b-c78293eeb02b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:57:01 crc kubenswrapper[4706]: I1206 05:57:01.998316 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9d06d7a2-470f-433c-870b-c78293eeb02b-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:57:02 crc kubenswrapper[4706]: I1206 05:57:02.092392 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xn9dq"] Dec 06 05:57:02 crc kubenswrapper[4706]: I1206 05:57:02.101070 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xn9dq"] Dec 06 05:57:02 crc kubenswrapper[4706]: I1206 05:57:02.290390 4706 scope.go:117] "RemoveContainer" containerID="e56779f538657cb0da003ac85956bff6d900551b32e6483bb8d3f6dcbb2d2ee3" Dec 06 05:57:02 crc kubenswrapper[4706]: I1206 05:57:02.473728 4706 scope.go:117] "RemoveContainer" containerID="a5bd9ea1d1199644dbbba32baf6345fb765d6ddb866a19a6f22bb3329b062e0f" Dec 06 05:57:04 crc kubenswrapper[4706]: I1206 05:57:04.049332 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" path="/var/lib/kubelet/pods/9d06d7a2-470f-433c-870b-c78293eeb02b/volumes" Dec 06 05:57:11 crc kubenswrapper[4706]: I1206 05:57:11.039293 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-9vnhs"] Dec 06 05:57:11 crc kubenswrapper[4706]: I1206 05:57:11.059957 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-mzfvf"] Dec 06 05:57:11 crc kubenswrapper[4706]: I1206 05:57:11.078418 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-9vnhs"] Dec 06 05:57:11 crc kubenswrapper[4706]: I1206 05:57:11.089327 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-mzfvf"] Dec 06 05:57:12 crc kubenswrapper[4706]: I1206 05:57:12.048524 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03d1d05b-3978-41bd-a7b6-5c0465432409" path="/var/lib/kubelet/pods/03d1d05b-3978-41bd-a7b6-5c0465432409/volumes" Dec 06 05:57:12 crc kubenswrapper[4706]: I1206 05:57:12.049656 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f0ced3b-4b02-4ce1-935a-af7cc2e01346" path="/var/lib/kubelet/pods/4f0ced3b-4b02-4ce1-935a-af7cc2e01346/volumes" Dec 06 05:57:22 crc kubenswrapper[4706]: I1206 05:57:22.052862 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-csbkx"] Dec 06 05:57:22 crc kubenswrapper[4706]: I1206 05:57:22.067657 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-csbkx"] Dec 06 05:57:24 crc kubenswrapper[4706]: I1206 05:57:24.045848 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bec3465-219b-4c57-83a9-aed4c78d1483" path="/var/lib/kubelet/pods/7bec3465-219b-4c57-83a9-aed4c78d1483/volumes" Dec 06 05:57:29 crc kubenswrapper[4706]: I1206 05:57:29.047584 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-pjt6m"] Dec 06 05:57:29 crc kubenswrapper[4706]: I1206 05:57:29.064520 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-pjt6m"] Dec 06 05:57:30 crc kubenswrapper[4706]: I1206 05:57:30.048614 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc067583-4394-4fa3-86fc-d6e626ec0f18" path="/var/lib/kubelet/pods/bc067583-4394-4fa3-86fc-d6e626ec0f18/volumes" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.127997 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-tkr88"] Dec 06 05:57:37 crc kubenswrapper[4706]: E1206 05:57:37.129160 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="extract-content" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.129176 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="extract-content" Dec 06 05:57:37 crc kubenswrapper[4706]: E1206 05:57:37.129193 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="registry-server" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.129200 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="registry-server" Dec 06 05:57:37 crc kubenswrapper[4706]: E1206 05:57:37.129226 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="extract-utilities" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.129234 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="extract-utilities" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.129410 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d06d7a2-470f-433c-870b-c78293eeb02b" containerName="registry-server" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.130798 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.143593 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-utilities\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.143684 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8cnd\" (UniqueName: \"kubernetes.io/projected/9b9b2465-7610-443f-9c1d-6633e0da786e-kube-api-access-t8cnd\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.143755 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-catalog-content\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.159725 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tkr88"] Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.245310 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-utilities\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.245382 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8cnd\" (UniqueName: \"kubernetes.io/projected/9b9b2465-7610-443f-9c1d-6633e0da786e-kube-api-access-t8cnd\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.245438 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-catalog-content\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.245949 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-catalog-content\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.246275 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-utilities\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.267450 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8cnd\" (UniqueName: \"kubernetes.io/projected/9b9b2465-7610-443f-9c1d-6633e0da786e-kube-api-access-t8cnd\") pod \"redhat-operators-tkr88\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.462986 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:37 crc kubenswrapper[4706]: W1206 05:57:37.970448 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b9b2465_7610_443f_9c1d_6633e0da786e.slice/crio-8055f2ef01c2dc9a4cb7323543bfcd4187c6d23d2e0f013ca4d120c26be96306 WatchSource:0}: Error finding container 8055f2ef01c2dc9a4cb7323543bfcd4187c6d23d2e0f013ca4d120c26be96306: Status 404 returned error can't find the container with id 8055f2ef01c2dc9a4cb7323543bfcd4187c6d23d2e0f013ca4d120c26be96306 Dec 06 05:57:37 crc kubenswrapper[4706]: I1206 05:57:37.982685 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-tkr88"] Dec 06 05:57:38 crc kubenswrapper[4706]: I1206 05:57:38.298632 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerStarted","Data":"8055f2ef01c2dc9a4cb7323543bfcd4187c6d23d2e0f013ca4d120c26be96306"} Dec 06 05:57:39 crc kubenswrapper[4706]: I1206 05:57:39.310335 4706 generic.go:334] "Generic (PLEG): container finished" podID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerID="fd10b7f8b7bf30f43fa1e5a57d8fe9ff122a3e3cd6758180e4a953e4ee91f37f" exitCode=0 Dec 06 05:57:39 crc kubenswrapper[4706]: I1206 05:57:39.310399 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerDied","Data":"fd10b7f8b7bf30f43fa1e5a57d8fe9ff122a3e3cd6758180e4a953e4ee91f37f"} Dec 06 05:57:40 crc kubenswrapper[4706]: I1206 05:57:40.320588 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerStarted","Data":"3031614c8cad9d25712556c28494198fb715088cb341ae4df5d000a2fecd4cef"} Dec 06 05:57:41 crc kubenswrapper[4706]: I1206 05:57:41.329743 4706 generic.go:334] "Generic (PLEG): container finished" podID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerID="3031614c8cad9d25712556c28494198fb715088cb341ae4df5d000a2fecd4cef" exitCode=0 Dec 06 05:57:41 crc kubenswrapper[4706]: I1206 05:57:41.329829 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerDied","Data":"3031614c8cad9d25712556c28494198fb715088cb341ae4df5d000a2fecd4cef"} Dec 06 05:57:42 crc kubenswrapper[4706]: I1206 05:57:42.341834 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerStarted","Data":"83487f68b1591c6eeb9850f8d613dde73f2705c5723998b36321a4dbdec1fbc9"} Dec 06 05:57:42 crc kubenswrapper[4706]: I1206 05:57:42.367587 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-tkr88" podStartSLOduration=2.8835511929999997 podStartE2EDuration="5.36756802s" podCreationTimestamp="2025-12-06 05:57:37 +0000 UTC" firstStartedPulling="2025-12-06 05:57:39.312142818 +0000 UTC m=+2281.639966762" lastFinishedPulling="2025-12-06 05:57:41.796159645 +0000 UTC m=+2284.123983589" observedRunningTime="2025-12-06 05:57:42.363601223 +0000 UTC m=+2284.691425167" watchObservedRunningTime="2025-12-06 05:57:42.36756802 +0000 UTC m=+2284.695391964" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.032356 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-mpcv6"] Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.038886 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-mpcv6"] Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.329425 4706 scope.go:117] "RemoveContainer" containerID="607fcf298a4deeae981f6df41e2639129c5e0b5fa0542c0178fbfeb81fd05716" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.464072 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.464133 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.517286 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.660948 4706 scope.go:117] "RemoveContainer" containerID="f4c3a5a604c3d6b739b1cdb3098d206b09058eebb8b42cf5898c707a28617b45" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.698631 4706 scope.go:117] "RemoveContainer" containerID="8ab97a3e9911e22c9eec4678e401db9aeeef45fe97e735d2361d29ec47239633" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.749741 4706 scope.go:117] "RemoveContainer" containerID="0956ef6adc5ea811ae355357ff7be56603cafeab53c5ac9b353d08f2502893a2" Dec 06 05:57:47 crc kubenswrapper[4706]: I1206 05:57:47.783969 4706 scope.go:117] "RemoveContainer" containerID="8ac0b108392f0f9075ab778bb7034eb663c81084180d87d2ff18e5f093aadb51" Dec 06 05:57:48 crc kubenswrapper[4706]: I1206 05:57:48.028035 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-e1a8-account-create-update-cms92"] Dec 06 05:57:48 crc kubenswrapper[4706]: I1206 05:57:48.047373 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7867b3a-9ee2-4c8e-a401-7181a5c4a9da" path="/var/lib/kubelet/pods/f7867b3a-9ee2-4c8e-a401-7181a5c4a9da/volumes" Dec 06 05:57:48 crc kubenswrapper[4706]: I1206 05:57:48.047853 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-e1a8-account-create-update-cms92"] Dec 06 05:57:48 crc kubenswrapper[4706]: I1206 05:57:48.434910 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:48 crc kubenswrapper[4706]: I1206 05:57:48.477377 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tkr88"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.028514 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-46nqn"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.044541 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-b649-account-create-update-htrrc"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.066911 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-8xcns"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.076213 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-c61d-account-create-update-n5zhs"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.083826 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-46nqn"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.092103 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-8xcns"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.100570 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-b649-account-create-update-htrrc"] Dec 06 05:57:49 crc kubenswrapper[4706]: I1206 05:57:49.108986 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-c61d-account-create-update-n5zhs"] Dec 06 05:57:50 crc kubenswrapper[4706]: I1206 05:57:50.058203 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38fec6d0-a4dc-45b4-a7fb-7a185ce174e4" path="/var/lib/kubelet/pods/38fec6d0-a4dc-45b4-a7fb-7a185ce174e4/volumes" Dec 06 05:57:50 crc kubenswrapper[4706]: I1206 05:57:50.059082 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="956e4870-4475-4d8e-a0c2-0ffefcfcbb1f" path="/var/lib/kubelet/pods/956e4870-4475-4d8e-a0c2-0ffefcfcbb1f/volumes" Dec 06 05:57:50 crc kubenswrapper[4706]: I1206 05:57:50.059953 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aea1974a-8997-47fe-9c50-26387876a96a" path="/var/lib/kubelet/pods/aea1974a-8997-47fe-9c50-26387876a96a/volumes" Dec 06 05:57:50 crc kubenswrapper[4706]: I1206 05:57:50.060790 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c07df634-8325-4942-b8a1-7764cd036d1f" path="/var/lib/kubelet/pods/c07df634-8325-4942-b8a1-7764cd036d1f/volumes" Dec 06 05:57:50 crc kubenswrapper[4706]: I1206 05:57:50.062326 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f16bb998-03b9-4bd9-93d4-9965fd119d32" path="/var/lib/kubelet/pods/f16bb998-03b9-4bd9-93d4-9965fd119d32/volumes" Dec 06 05:57:50 crc kubenswrapper[4706]: I1206 05:57:50.409571 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-tkr88" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="registry-server" containerID="cri-o://83487f68b1591c6eeb9850f8d613dde73f2705c5723998b36321a4dbdec1fbc9" gracePeriod=2 Dec 06 05:57:52 crc kubenswrapper[4706]: E1206 05:57:52.121459 4706 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b9b2465_7610_443f_9c1d_6633e0da786e.slice/crio-conmon-83487f68b1591c6eeb9850f8d613dde73f2705c5723998b36321a4dbdec1fbc9.scope\": RecentStats: unable to find data in memory cache]" Dec 06 05:57:52 crc kubenswrapper[4706]: I1206 05:57:52.432907 4706 generic.go:334] "Generic (PLEG): container finished" podID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerID="83487f68b1591c6eeb9850f8d613dde73f2705c5723998b36321a4dbdec1fbc9" exitCode=0 Dec 06 05:57:52 crc kubenswrapper[4706]: I1206 05:57:52.432967 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerDied","Data":"83487f68b1591c6eeb9850f8d613dde73f2705c5723998b36321a4dbdec1fbc9"} Dec 06 05:57:52 crc kubenswrapper[4706]: I1206 05:57:52.934114 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.011978 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-catalog-content\") pod \"9b9b2465-7610-443f-9c1d-6633e0da786e\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.012305 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8cnd\" (UniqueName: \"kubernetes.io/projected/9b9b2465-7610-443f-9c1d-6633e0da786e-kube-api-access-t8cnd\") pod \"9b9b2465-7610-443f-9c1d-6633e0da786e\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.012452 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-utilities\") pod \"9b9b2465-7610-443f-9c1d-6633e0da786e\" (UID: \"9b9b2465-7610-443f-9c1d-6633e0da786e\") " Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.013192 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-utilities" (OuterVolumeSpecName: "utilities") pod "9b9b2465-7610-443f-9c1d-6633e0da786e" (UID: "9b9b2465-7610-443f-9c1d-6633e0da786e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.019166 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b9b2465-7610-443f-9c1d-6633e0da786e-kube-api-access-t8cnd" (OuterVolumeSpecName: "kube-api-access-t8cnd") pod "9b9b2465-7610-443f-9c1d-6633e0da786e" (UID: "9b9b2465-7610-443f-9c1d-6633e0da786e"). InnerVolumeSpecName "kube-api-access-t8cnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.114827 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.114865 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8cnd\" (UniqueName: \"kubernetes.io/projected/9b9b2465-7610-443f-9c1d-6633e0da786e-kube-api-access-t8cnd\") on node \"crc\" DevicePath \"\"" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.126911 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9b9b2465-7610-443f-9c1d-6633e0da786e" (UID: "9b9b2465-7610-443f-9c1d-6633e0da786e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.216687 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9b9b2465-7610-443f-9c1d-6633e0da786e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.443309 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-tkr88" event={"ID":"9b9b2465-7610-443f-9c1d-6633e0da786e","Type":"ContainerDied","Data":"8055f2ef01c2dc9a4cb7323543bfcd4187c6d23d2e0f013ca4d120c26be96306"} Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.443646 4706 scope.go:117] "RemoveContainer" containerID="83487f68b1591c6eeb9850f8d613dde73f2705c5723998b36321a4dbdec1fbc9" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.443352 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-tkr88" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.464484 4706 scope.go:117] "RemoveContainer" containerID="3031614c8cad9d25712556c28494198fb715088cb341ae4df5d000a2fecd4cef" Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.476302 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-tkr88"] Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.484455 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-tkr88"] Dec 06 05:57:53 crc kubenswrapper[4706]: I1206 05:57:53.495960 4706 scope.go:117] "RemoveContainer" containerID="fd10b7f8b7bf30f43fa1e5a57d8fe9ff122a3e3cd6758180e4a953e4ee91f37f" Dec 06 05:57:54 crc kubenswrapper[4706]: I1206 05:57:54.048174 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" path="/var/lib/kubelet/pods/9b9b2465-7610-443f-9c1d-6633e0da786e/volumes" Dec 06 05:58:00 crc kubenswrapper[4706]: I1206 05:58:00.507829 4706 generic.go:334] "Generic (PLEG): container finished" podID="582f8518-3c87-496d-b057-b2f66658a731" containerID="f107c4c1a92968531c1a6256c9a1ab5a1de16e914d058be1d3196a9143d52393" exitCode=0 Dec 06 05:58:00 crc kubenswrapper[4706]: I1206 05:58:00.507932 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" event={"ID":"582f8518-3c87-496d-b057-b2f66658a731","Type":"ContainerDied","Data":"f107c4c1a92968531c1a6256c9a1ab5a1de16e914d058be1d3196a9143d52393"} Dec 06 05:58:01 crc kubenswrapper[4706]: I1206 05:58:01.938928 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.097301 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9c77s\" (UniqueName: \"kubernetes.io/projected/582f8518-3c87-496d-b057-b2f66658a731-kube-api-access-9c77s\") pod \"582f8518-3c87-496d-b057-b2f66658a731\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.097413 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-inventory\") pod \"582f8518-3c87-496d-b057-b2f66658a731\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.097557 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-ssh-key\") pod \"582f8518-3c87-496d-b057-b2f66658a731\" (UID: \"582f8518-3c87-496d-b057-b2f66658a731\") " Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.109818 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/582f8518-3c87-496d-b057-b2f66658a731-kube-api-access-9c77s" (OuterVolumeSpecName: "kube-api-access-9c77s") pod "582f8518-3c87-496d-b057-b2f66658a731" (UID: "582f8518-3c87-496d-b057-b2f66658a731"). InnerVolumeSpecName "kube-api-access-9c77s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.123449 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-inventory" (OuterVolumeSpecName: "inventory") pod "582f8518-3c87-496d-b057-b2f66658a731" (UID: "582f8518-3c87-496d-b057-b2f66658a731"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.129260 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "582f8518-3c87-496d-b057-b2f66658a731" (UID: "582f8518-3c87-496d-b057-b2f66658a731"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.199429 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.199462 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9c77s\" (UniqueName: \"kubernetes.io/projected/582f8518-3c87-496d-b057-b2f66658a731-kube-api-access-9c77s\") on node \"crc\" DevicePath \"\"" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.199471 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/582f8518-3c87-496d-b057-b2f66658a731-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.531356 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" event={"ID":"582f8518-3c87-496d-b057-b2f66658a731","Type":"ContainerDied","Data":"62fca0a570e18bddda23d1a29bf62ef6d7c187ed613bedcbe485ec8f9022c38e"} Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.531786 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62fca0a570e18bddda23d1a29bf62ef6d7c187ed613bedcbe485ec8f9022c38e" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.531434 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.770798 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2"] Dec 06 05:58:02 crc kubenswrapper[4706]: E1206 05:58:02.771610 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="582f8518-3c87-496d-b057-b2f66658a731" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.771627 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="582f8518-3c87-496d-b057-b2f66658a731" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 06 05:58:02 crc kubenswrapper[4706]: E1206 05:58:02.771669 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="extract-utilities" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.771677 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="extract-utilities" Dec 06 05:58:02 crc kubenswrapper[4706]: E1206 05:58:02.771727 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="extract-content" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.771741 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="extract-content" Dec 06 05:58:02 crc kubenswrapper[4706]: E1206 05:58:02.771759 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="registry-server" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.771766 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="registry-server" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.772165 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b9b2465-7610-443f-9c1d-6633e0da786e" containerName="registry-server" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.772187 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="582f8518-3c87-496d-b057-b2f66658a731" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.773124 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.786542 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.786424 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.788073 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.789542 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.792447 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2"] Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.914346 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.914462 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5z79\" (UniqueName: \"kubernetes.io/projected/3dc977db-985f-4d5a-8735-0c417c7be72c-kube-api-access-t5z79\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:02 crc kubenswrapper[4706]: I1206 05:58:02.914558 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.016511 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.016605 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.016648 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5z79\" (UniqueName: \"kubernetes.io/projected/3dc977db-985f-4d5a-8735-0c417c7be72c-kube-api-access-t5z79\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.022039 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.022906 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.034977 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5z79\" (UniqueName: \"kubernetes.io/projected/3dc977db-985f-4d5a-8735-0c417c7be72c-kube-api-access-t5z79\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.091834 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:58:03 crc kubenswrapper[4706]: I1206 05:58:03.661366 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2"] Dec 06 05:58:04 crc kubenswrapper[4706]: I1206 05:58:04.559808 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" event={"ID":"3dc977db-985f-4d5a-8735-0c417c7be72c","Type":"ContainerStarted","Data":"495ea96077353e0c7c869d2ac7b124d9090f3c8a9acf053a940ce546bf556fb7"} Dec 06 05:58:06 crc kubenswrapper[4706]: I1206 05:58:06.577004 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" event={"ID":"3dc977db-985f-4d5a-8735-0c417c7be72c","Type":"ContainerStarted","Data":"f69b53c431b8f196a7fac7ed7202861d3281fe51a3b16c88bcaf63422f8dde58"} Dec 06 05:58:06 crc kubenswrapper[4706]: I1206 05:58:06.607983 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" podStartSLOduration=3.003357014 podStartE2EDuration="4.607960884s" podCreationTimestamp="2025-12-06 05:58:02 +0000 UTC" firstStartedPulling="2025-12-06 05:58:03.671457359 +0000 UTC m=+2305.999281303" lastFinishedPulling="2025-12-06 05:58:05.276061229 +0000 UTC m=+2307.603885173" observedRunningTime="2025-12-06 05:58:06.598159859 +0000 UTC m=+2308.925983823" watchObservedRunningTime="2025-12-06 05:58:06.607960884 +0000 UTC m=+2308.935784828" Dec 06 05:58:21 crc kubenswrapper[4706]: I1206 05:58:21.043101 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7gj9p"] Dec 06 05:58:21 crc kubenswrapper[4706]: I1206 05:58:21.050115 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-7gj9p"] Dec 06 05:58:22 crc kubenswrapper[4706]: I1206 05:58:22.047946 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ff0bb4e-18a4-493e-a666-e94aa8bacea5" path="/var/lib/kubelet/pods/1ff0bb4e-18a4-493e-a666-e94aa8bacea5/volumes" Dec 06 05:58:47 crc kubenswrapper[4706]: I1206 05:58:47.943673 4706 scope.go:117] "RemoveContainer" containerID="be2d46dba7aeff67ac6593d4426b40cbcef335e71dab6335d7a1824b22b0c3e3" Dec 06 05:58:47 crc kubenswrapper[4706]: I1206 05:58:47.994927 4706 scope.go:117] "RemoveContainer" containerID="9eb7527fd6a16530ec86a0b09cf651ea3e82aa77480121303f71b0dcc3752223" Dec 06 05:58:48 crc kubenswrapper[4706]: I1206 05:58:48.023533 4706 scope.go:117] "RemoveContainer" containerID="4e5fa7c61a33dcba81caa08304cd477c9adc99b2f1ccc89c3a3eee8d9e72b64e" Dec 06 05:58:48 crc kubenswrapper[4706]: I1206 05:58:48.068949 4706 scope.go:117] "RemoveContainer" containerID="d93d5832287bcfef20e6d3118eb35f0b99cd0ea998cac2818de586f05efa6e83" Dec 06 05:58:48 crc kubenswrapper[4706]: I1206 05:58:48.128988 4706 scope.go:117] "RemoveContainer" containerID="124793da4ee8398b9eaa609f740ad8ed52568cb7727619a5b3c02ab729c97906" Dec 06 05:58:48 crc kubenswrapper[4706]: I1206 05:58:48.164154 4706 scope.go:117] "RemoveContainer" containerID="06cfd71be6b79ca7c45f10048f870337836e1de93d78172a3304daa3b5743981" Dec 06 05:58:48 crc kubenswrapper[4706]: I1206 05:58:48.206974 4706 scope.go:117] "RemoveContainer" containerID="071f148d3d13ca4f06eb329307ee9036ac0873e1ebccdfef816cc464ff4d7602" Dec 06 05:59:05 crc kubenswrapper[4706]: I1206 05:59:05.961586 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:59:05 crc kubenswrapper[4706]: I1206 05:59:05.962157 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:59:16 crc kubenswrapper[4706]: I1206 05:59:16.057355 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-dcfr8"] Dec 06 05:59:16 crc kubenswrapper[4706]: I1206 05:59:16.057971 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-dcfr8"] Dec 06 05:59:18 crc kubenswrapper[4706]: I1206 05:59:18.055960 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6e7005d-f57b-4d7c-a421-a259700fa0ad" path="/var/lib/kubelet/pods/b6e7005d-f57b-4d7c-a421-a259700fa0ad/volumes" Dec 06 05:59:24 crc kubenswrapper[4706]: I1206 05:59:24.266105 4706 generic.go:334] "Generic (PLEG): container finished" podID="3dc977db-985f-4d5a-8735-0c417c7be72c" containerID="f69b53c431b8f196a7fac7ed7202861d3281fe51a3b16c88bcaf63422f8dde58" exitCode=0 Dec 06 05:59:24 crc kubenswrapper[4706]: I1206 05:59:24.266502 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" event={"ID":"3dc977db-985f-4d5a-8735-0c417c7be72c","Type":"ContainerDied","Data":"f69b53c431b8f196a7fac7ed7202861d3281fe51a3b16c88bcaf63422f8dde58"} Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.652725 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.704300 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t5z79\" (UniqueName: \"kubernetes.io/projected/3dc977db-985f-4d5a-8735-0c417c7be72c-kube-api-access-t5z79\") pod \"3dc977db-985f-4d5a-8735-0c417c7be72c\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.704430 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-inventory\") pod \"3dc977db-985f-4d5a-8735-0c417c7be72c\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.704550 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-ssh-key\") pod \"3dc977db-985f-4d5a-8735-0c417c7be72c\" (UID: \"3dc977db-985f-4d5a-8735-0c417c7be72c\") " Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.710593 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3dc977db-985f-4d5a-8735-0c417c7be72c-kube-api-access-t5z79" (OuterVolumeSpecName: "kube-api-access-t5z79") pod "3dc977db-985f-4d5a-8735-0c417c7be72c" (UID: "3dc977db-985f-4d5a-8735-0c417c7be72c"). InnerVolumeSpecName "kube-api-access-t5z79". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.734184 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-inventory" (OuterVolumeSpecName: "inventory") pod "3dc977db-985f-4d5a-8735-0c417c7be72c" (UID: "3dc977db-985f-4d5a-8735-0c417c7be72c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.743171 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3dc977db-985f-4d5a-8735-0c417c7be72c" (UID: "3dc977db-985f-4d5a-8735-0c417c7be72c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.808475 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.808518 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3dc977db-985f-4d5a-8735-0c417c7be72c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:59:25 crc kubenswrapper[4706]: I1206 05:59:25.808531 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t5z79\" (UniqueName: \"kubernetes.io/projected/3dc977db-985f-4d5a-8735-0c417c7be72c-kube-api-access-t5z79\") on node \"crc\" DevicePath \"\"" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.285225 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" event={"ID":"3dc977db-985f-4d5a-8735-0c417c7be72c","Type":"ContainerDied","Data":"495ea96077353e0c7c869d2ac7b124d9090f3c8a9acf053a940ce546bf556fb7"} Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.285265 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="495ea96077353e0c7c869d2ac7b124d9090f3c8a9acf053a940ce546bf556fb7" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.285335 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.388584 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq"] Dec 06 05:59:26 crc kubenswrapper[4706]: E1206 05:59:26.389286 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3dc977db-985f-4d5a-8735-0c417c7be72c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.389407 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="3dc977db-985f-4d5a-8735-0c417c7be72c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.389740 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="3dc977db-985f-4d5a-8735-0c417c7be72c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.390836 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.401587 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.401810 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.401974 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.402171 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq"] Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.402205 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.525020 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.525144 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rd4cb\" (UniqueName: \"kubernetes.io/projected/6c4f877c-27aa-40eb-b5ff-2968f748a978-kube-api-access-rd4cb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.525192 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.627262 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.627363 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rd4cb\" (UniqueName: \"kubernetes.io/projected/6c4f877c-27aa-40eb-b5ff-2968f748a978-kube-api-access-rd4cb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.627407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.631471 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.637654 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.644927 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rd4cb\" (UniqueName: \"kubernetes.io/projected/6c4f877c-27aa-40eb-b5ff-2968f748a978-kube-api-access-rd4cb\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:26 crc kubenswrapper[4706]: I1206 05:59:26.714130 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:27 crc kubenswrapper[4706]: I1206 05:59:27.253875 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq"] Dec 06 05:59:27 crc kubenswrapper[4706]: I1206 05:59:27.263614 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 05:59:27 crc kubenswrapper[4706]: I1206 05:59:27.294930 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" event={"ID":"6c4f877c-27aa-40eb-b5ff-2968f748a978","Type":"ContainerStarted","Data":"6531f9c98118e36c887865504b60662cd6f79083bef82c75ec6be779c198f739"} Dec 06 05:59:29 crc kubenswrapper[4706]: I1206 05:59:29.046305 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rxnl6"] Dec 06 05:59:29 crc kubenswrapper[4706]: I1206 05:59:29.065030 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-rxnl6"] Dec 06 05:59:30 crc kubenswrapper[4706]: I1206 05:59:30.051708 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff26c6d0-68cb-4541-b647-3a0b244db53c" path="/var/lib/kubelet/pods/ff26c6d0-68cb-4541-b647-3a0b244db53c/volumes" Dec 06 05:59:31 crc kubenswrapper[4706]: I1206 05:59:31.330034 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" event={"ID":"6c4f877c-27aa-40eb-b5ff-2968f748a978","Type":"ContainerStarted","Data":"8a627a70796f0eec9a7f50b01eca8bb89de2d89b0f191881531bc20da3133f8e"} Dec 06 05:59:31 crc kubenswrapper[4706]: I1206 05:59:31.350542 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" podStartSLOduration=2.216104574 podStartE2EDuration="5.350525033s" podCreationTimestamp="2025-12-06 05:59:26 +0000 UTC" firstStartedPulling="2025-12-06 05:59:27.26337368 +0000 UTC m=+2389.591197624" lastFinishedPulling="2025-12-06 05:59:30.397794139 +0000 UTC m=+2392.725618083" observedRunningTime="2025-12-06 05:59:31.345357664 +0000 UTC m=+2393.673181628" watchObservedRunningTime="2025-12-06 05:59:31.350525033 +0000 UTC m=+2393.678348977" Dec 06 05:59:35 crc kubenswrapper[4706]: I1206 05:59:35.364281 4706 generic.go:334] "Generic (PLEG): container finished" podID="6c4f877c-27aa-40eb-b5ff-2968f748a978" containerID="8a627a70796f0eec9a7f50b01eca8bb89de2d89b0f191881531bc20da3133f8e" exitCode=0 Dec 06 05:59:35 crc kubenswrapper[4706]: I1206 05:59:35.364349 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" event={"ID":"6c4f877c-27aa-40eb-b5ff-2968f748a978","Type":"ContainerDied","Data":"8a627a70796f0eec9a7f50b01eca8bb89de2d89b0f191881531bc20da3133f8e"} Dec 06 05:59:35 crc kubenswrapper[4706]: I1206 05:59:35.961409 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 05:59:35 crc kubenswrapper[4706]: I1206 05:59:35.961472 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.750710 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.828452 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-ssh-key\") pod \"6c4f877c-27aa-40eb-b5ff-2968f748a978\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.828566 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-inventory\") pod \"6c4f877c-27aa-40eb-b5ff-2968f748a978\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.828632 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rd4cb\" (UniqueName: \"kubernetes.io/projected/6c4f877c-27aa-40eb-b5ff-2968f748a978-kube-api-access-rd4cb\") pod \"6c4f877c-27aa-40eb-b5ff-2968f748a978\" (UID: \"6c4f877c-27aa-40eb-b5ff-2968f748a978\") " Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.833448 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c4f877c-27aa-40eb-b5ff-2968f748a978-kube-api-access-rd4cb" (OuterVolumeSpecName: "kube-api-access-rd4cb") pod "6c4f877c-27aa-40eb-b5ff-2968f748a978" (UID: "6c4f877c-27aa-40eb-b5ff-2968f748a978"). InnerVolumeSpecName "kube-api-access-rd4cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.857221 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-inventory" (OuterVolumeSpecName: "inventory") pod "6c4f877c-27aa-40eb-b5ff-2968f748a978" (UID: "6c4f877c-27aa-40eb-b5ff-2968f748a978"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.857422 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6c4f877c-27aa-40eb-b5ff-2968f748a978" (UID: "6c4f877c-27aa-40eb-b5ff-2968f748a978"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.930680 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rd4cb\" (UniqueName: \"kubernetes.io/projected/6c4f877c-27aa-40eb-b5ff-2968f748a978-kube-api-access-rd4cb\") on node \"crc\" DevicePath \"\"" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.930725 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 05:59:36 crc kubenswrapper[4706]: I1206 05:59:36.930734 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6c4f877c-27aa-40eb-b5ff-2968f748a978-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.383400 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" event={"ID":"6c4f877c-27aa-40eb-b5ff-2968f748a978","Type":"ContainerDied","Data":"6531f9c98118e36c887865504b60662cd6f79083bef82c75ec6be779c198f739"} Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.384160 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6531f9c98118e36c887865504b60662cd6f79083bef82c75ec6be779c198f739" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.383453 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.453865 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm"] Dec 06 05:59:37 crc kubenswrapper[4706]: E1206 05:59:37.454259 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c4f877c-27aa-40eb-b5ff-2968f748a978" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.454276 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c4f877c-27aa-40eb-b5ff-2968f748a978" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.454503 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c4f877c-27aa-40eb-b5ff-2968f748a978" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.455261 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.457400 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.459370 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.459605 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.459899 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.466933 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm"] Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.546942 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.547018 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.547049 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr62g\" (UniqueName: \"kubernetes.io/projected/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-kube-api-access-nr62g\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.648594 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.648669 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.648690 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr62g\" (UniqueName: \"kubernetes.io/projected/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-kube-api-access-nr62g\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.653580 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.654767 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.664244 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr62g\" (UniqueName: \"kubernetes.io/projected/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-kube-api-access-nr62g\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-cpbpm\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:37 crc kubenswrapper[4706]: I1206 05:59:37.775208 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 05:59:38 crc kubenswrapper[4706]: I1206 05:59:38.289581 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm"] Dec 06 05:59:38 crc kubenswrapper[4706]: I1206 05:59:38.395020 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" event={"ID":"82ebe200-9dff-4f3b-8bf1-e1a6feee951c","Type":"ContainerStarted","Data":"661200582ca3b3111144d96b1b83d6652034f361238a7c5aec42d70ebb9466af"} Dec 06 05:59:38 crc kubenswrapper[4706]: I1206 05:59:38.519703 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 05:59:39 crc kubenswrapper[4706]: I1206 05:59:39.405224 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" event={"ID":"82ebe200-9dff-4f3b-8bf1-e1a6feee951c","Type":"ContainerStarted","Data":"d5d50b2acaf08475cf590b28f1a74d2fdbaa23f85ec839af5731d0bab4272918"} Dec 06 05:59:39 crc kubenswrapper[4706]: I1206 05:59:39.421506 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" podStartSLOduration=2.20135105 podStartE2EDuration="2.421488945s" podCreationTimestamp="2025-12-06 05:59:37 +0000 UTC" firstStartedPulling="2025-12-06 05:59:38.29650634 +0000 UTC m=+2400.624330284" lastFinishedPulling="2025-12-06 05:59:38.516644225 +0000 UTC m=+2400.844468179" observedRunningTime="2025-12-06 05:59:39.420868799 +0000 UTC m=+2401.748692753" watchObservedRunningTime="2025-12-06 05:59:39.421488945 +0000 UTC m=+2401.749312889" Dec 06 05:59:48 crc kubenswrapper[4706]: I1206 05:59:48.356863 4706 scope.go:117] "RemoveContainer" containerID="653bcd3b827623823de59d10cf25ee9c8e417c5ea44bad8f8e3d0b03e626d53e" Dec 06 05:59:48 crc kubenswrapper[4706]: I1206 05:59:48.401350 4706 scope.go:117] "RemoveContainer" containerID="6821d12e2756ebfcf7ceff68b64c07924df203a6b5125fd4ed2e902ddec8a81d" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.147192 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv"] Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.148984 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.151927 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.152084 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.156292 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv"] Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.269960 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-secret-volume\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.270013 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rskr\" (UniqueName: \"kubernetes.io/projected/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-kube-api-access-5rskr\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.270193 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-config-volume\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.372665 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-secret-volume\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.372772 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rskr\" (UniqueName: \"kubernetes.io/projected/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-kube-api-access-5rskr\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.372900 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-config-volume\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.373866 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-config-volume\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.389244 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-secret-volume\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.391930 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rskr\" (UniqueName: \"kubernetes.io/projected/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-kube-api-access-5rskr\") pod \"collect-profiles-29416680-r67cv\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.505214 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:00 crc kubenswrapper[4706]: I1206 06:00:00.948523 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv"] Dec 06 06:00:01 crc kubenswrapper[4706]: I1206 06:00:01.785173 4706 generic.go:334] "Generic (PLEG): container finished" podID="c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" containerID="ea5d1edebf731a92a660007b0d6e70a5086d41553d9871268b397ca23cfd2df5" exitCode=0 Dec 06 06:00:01 crc kubenswrapper[4706]: I1206 06:00:01.785609 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" event={"ID":"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad","Type":"ContainerDied","Data":"ea5d1edebf731a92a660007b0d6e70a5086d41553d9871268b397ca23cfd2df5"} Dec 06 06:00:01 crc kubenswrapper[4706]: I1206 06:00:01.786298 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" event={"ID":"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad","Type":"ContainerStarted","Data":"f8f659a2a5ae44d432c05d412af507b3a8f9894b0849d69136850402c1cf612e"} Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.096840 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.259581 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5rskr\" (UniqueName: \"kubernetes.io/projected/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-kube-api-access-5rskr\") pod \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.259869 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-secret-volume\") pod \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.260177 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-config-volume\") pod \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\" (UID: \"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad\") " Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.261306 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-config-volume" (OuterVolumeSpecName: "config-volume") pod "c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" (UID: "c32a94c6-6869-41d4-bc7a-c2aa66ba68ad"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.265816 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-kube-api-access-5rskr" (OuterVolumeSpecName: "kube-api-access-5rskr") pod "c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" (UID: "c32a94c6-6869-41d4-bc7a-c2aa66ba68ad"). InnerVolumeSpecName "kube-api-access-5rskr". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.266035 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" (UID: "c32a94c6-6869-41d4-bc7a-c2aa66ba68ad"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.362401 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.362436 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5rskr\" (UniqueName: \"kubernetes.io/projected/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-kube-api-access-5rskr\") on node \"crc\" DevicePath \"\"" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.362447 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.802503 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" event={"ID":"c32a94c6-6869-41d4-bc7a-c2aa66ba68ad","Type":"ContainerDied","Data":"f8f659a2a5ae44d432c05d412af507b3a8f9894b0849d69136850402c1cf612e"} Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.802545 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8f659a2a5ae44d432c05d412af507b3a8f9894b0849d69136850402c1cf612e" Dec 06 06:00:03 crc kubenswrapper[4706]: I1206 06:00:03.802553 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv" Dec 06 06:00:04 crc kubenswrapper[4706]: I1206 06:00:04.175032 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s"] Dec 06 06:00:04 crc kubenswrapper[4706]: I1206 06:00:04.182268 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416635-btv7s"] Dec 06 06:00:05 crc kubenswrapper[4706]: I1206 06:00:05.961433 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:00:05 crc kubenswrapper[4706]: I1206 06:00:05.961908 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:00:05 crc kubenswrapper[4706]: I1206 06:00:05.961955 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:00:05 crc kubenswrapper[4706]: I1206 06:00:05.962787 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:00:05 crc kubenswrapper[4706]: I1206 06:00:05.962844 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" gracePeriod=600 Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.051977 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39728d8c-03c4-42d3-999d-1dfe014cfb34" path="/var/lib/kubelet/pods/39728d8c-03c4-42d3-999d-1dfe014cfb34/volumes" Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.054713 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-j8txv"] Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.059022 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-j8txv"] Dec 06 06:00:06 crc kubenswrapper[4706]: E1206 06:00:06.588385 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.834304 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" exitCode=0 Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.834365 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb"} Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.834449 4706 scope.go:117] "RemoveContainer" containerID="802a2997475c34e3468ebd06845af6e76e5a777088e134200058afb534cc75ab" Dec 06 06:00:06 crc kubenswrapper[4706]: I1206 06:00:06.835358 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:00:06 crc kubenswrapper[4706]: E1206 06:00:06.835784 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:00:08 crc kubenswrapper[4706]: I1206 06:00:08.051760 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7cced43-4fd9-4594-afd8-9c3e1ce7bb69" path="/var/lib/kubelet/pods/b7cced43-4fd9-4594-afd8-9c3e1ce7bb69/volumes" Dec 06 06:00:16 crc kubenswrapper[4706]: I1206 06:00:16.912533 4706 generic.go:334] "Generic (PLEG): container finished" podID="82ebe200-9dff-4f3b-8bf1-e1a6feee951c" containerID="d5d50b2acaf08475cf590b28f1a74d2fdbaa23f85ec839af5731d0bab4272918" exitCode=0 Dec 06 06:00:16 crc kubenswrapper[4706]: I1206 06:00:16.912620 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" event={"ID":"82ebe200-9dff-4f3b-8bf1-e1a6feee951c","Type":"ContainerDied","Data":"d5d50b2acaf08475cf590b28f1a74d2fdbaa23f85ec839af5731d0bab4272918"} Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.353388 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.547251 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-ssh-key\") pod \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.547302 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-inventory\") pod \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.547331 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nr62g\" (UniqueName: \"kubernetes.io/projected/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-kube-api-access-nr62g\") pod \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\" (UID: \"82ebe200-9dff-4f3b-8bf1-e1a6feee951c\") " Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.557287 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-kube-api-access-nr62g" (OuterVolumeSpecName: "kube-api-access-nr62g") pod "82ebe200-9dff-4f3b-8bf1-e1a6feee951c" (UID: "82ebe200-9dff-4f3b-8bf1-e1a6feee951c"). InnerVolumeSpecName "kube-api-access-nr62g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.574523 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "82ebe200-9dff-4f3b-8bf1-e1a6feee951c" (UID: "82ebe200-9dff-4f3b-8bf1-e1a6feee951c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.577247 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-inventory" (OuterVolumeSpecName: "inventory") pod "82ebe200-9dff-4f3b-8bf1-e1a6feee951c" (UID: "82ebe200-9dff-4f3b-8bf1-e1a6feee951c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.649532 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.649560 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.649571 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nr62g\" (UniqueName: \"kubernetes.io/projected/82ebe200-9dff-4f3b-8bf1-e1a6feee951c-kube-api-access-nr62g\") on node \"crc\" DevicePath \"\"" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.934371 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" event={"ID":"82ebe200-9dff-4f3b-8bf1-e1a6feee951c","Type":"ContainerDied","Data":"661200582ca3b3111144d96b1b83d6652034f361238a7c5aec42d70ebb9466af"} Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.934439 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-cpbpm" Dec 06 06:00:18 crc kubenswrapper[4706]: I1206 06:00:18.934450 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="661200582ca3b3111144d96b1b83d6652034f361238a7c5aec42d70ebb9466af" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.040202 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9"] Dec 06 06:00:19 crc kubenswrapper[4706]: E1206 06:00:19.040593 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" containerName="collect-profiles" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.040609 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" containerName="collect-profiles" Dec 06 06:00:19 crc kubenswrapper[4706]: E1206 06:00:19.040643 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82ebe200-9dff-4f3b-8bf1-e1a6feee951c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.040653 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="82ebe200-9dff-4f3b-8bf1-e1a6feee951c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.040843 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="82ebe200-9dff-4f3b-8bf1-e1a6feee951c" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.040862 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" containerName="collect-profiles" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.041467 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.043929 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.043988 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.044087 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.045294 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.052443 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9"] Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.162540 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csz4m\" (UniqueName: \"kubernetes.io/projected/a71e1253-a40e-4b2b-b911-c15a88da2be5-kube-api-access-csz4m\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.162651 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.162788 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.264702 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.264858 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.264946 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csz4m\" (UniqueName: \"kubernetes.io/projected/a71e1253-a40e-4b2b-b911-c15a88da2be5-kube-api-access-csz4m\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.269846 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.271478 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.281690 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csz4m\" (UniqueName: \"kubernetes.io/projected/a71e1253-a40e-4b2b-b911-c15a88da2be5-kube-api-access-csz4m\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.359871 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.703690 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9"] Dec 06 06:00:19 crc kubenswrapper[4706]: W1206 06:00:19.708428 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda71e1253_a40e_4b2b_b911_c15a88da2be5.slice/crio-7525e5594338ab8f1e1347744409d5e177846ab80c479a286b8eeae931a30579 WatchSource:0}: Error finding container 7525e5594338ab8f1e1347744409d5e177846ab80c479a286b8eeae931a30579: Status 404 returned error can't find the container with id 7525e5594338ab8f1e1347744409d5e177846ab80c479a286b8eeae931a30579 Dec 06 06:00:19 crc kubenswrapper[4706]: I1206 06:00:19.942321 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" event={"ID":"a71e1253-a40e-4b2b-b911-c15a88da2be5","Type":"ContainerStarted","Data":"7525e5594338ab8f1e1347744409d5e177846ab80c479a286b8eeae931a30579"} Dec 06 06:00:20 crc kubenswrapper[4706]: I1206 06:00:20.039633 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:00:20 crc kubenswrapper[4706]: E1206 06:00:20.039944 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:00:21 crc kubenswrapper[4706]: I1206 06:00:21.958534 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" event={"ID":"a71e1253-a40e-4b2b-b911-c15a88da2be5","Type":"ContainerStarted","Data":"0e3cb29edf621147ea0ecc1007146c4bb575333c3d7f4e8eb9fc9491e225e10b"} Dec 06 06:00:21 crc kubenswrapper[4706]: I1206 06:00:21.983668 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" podStartSLOduration=1.357256443 podStartE2EDuration="2.983648684s" podCreationTimestamp="2025-12-06 06:00:19 +0000 UTC" firstStartedPulling="2025-12-06 06:00:19.710726402 +0000 UTC m=+2442.038550356" lastFinishedPulling="2025-12-06 06:00:21.337118653 +0000 UTC m=+2443.664942597" observedRunningTime="2025-12-06 06:00:21.978192997 +0000 UTC m=+2444.306016961" watchObservedRunningTime="2025-12-06 06:00:21.983648684 +0000 UTC m=+2444.311472638" Dec 06 06:00:33 crc kubenswrapper[4706]: I1206 06:00:33.036458 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:00:33 crc kubenswrapper[4706]: E1206 06:00:33.037221 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:00:44 crc kubenswrapper[4706]: I1206 06:00:44.036838 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:00:44 crc kubenswrapper[4706]: E1206 06:00:44.037802 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:00:48 crc kubenswrapper[4706]: I1206 06:00:48.497802 4706 scope.go:117] "RemoveContainer" containerID="b28219acfb02a2c4e23ef378f9824558b3ae988d58297dac54f350c94efd63db" Dec 06 06:00:48 crc kubenswrapper[4706]: I1206 06:00:48.541631 4706 scope.go:117] "RemoveContainer" containerID="72a35fd1caa4dd68f2228a6c426ff2fc121cea0a9cf2c8382b55c998a241e913" Dec 06 06:00:58 crc kubenswrapper[4706]: I1206 06:00:58.043304 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:00:58 crc kubenswrapper[4706]: E1206 06:00:58.045680 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.151990 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29416681-gqk5x"] Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.153411 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.163560 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29416681-gqk5x"] Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.308327 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-fernet-keys\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.308446 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-combined-ca-bundle\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.308476 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67qzp\" (UniqueName: \"kubernetes.io/projected/32439274-bc88-4aa9-b040-98212cda2b38-kube-api-access-67qzp\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.308512 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-config-data\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.409572 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-combined-ca-bundle\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.409967 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67qzp\" (UniqueName: \"kubernetes.io/projected/32439274-bc88-4aa9-b040-98212cda2b38-kube-api-access-67qzp\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.410017 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-config-data\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.410107 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-fernet-keys\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.417485 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-combined-ca-bundle\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.418228 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-fernet-keys\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.419641 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-config-data\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.428672 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67qzp\" (UniqueName: \"kubernetes.io/projected/32439274-bc88-4aa9-b040-98212cda2b38-kube-api-access-67qzp\") pod \"keystone-cron-29416681-gqk5x\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.522798 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:00 crc kubenswrapper[4706]: W1206 06:01:00.970370 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod32439274_bc88_4aa9_b040_98212cda2b38.slice/crio-5ccf95cd835fdf7cd91728243089ced3c255d25157d8831c6675e309ef5df8ce WatchSource:0}: Error finding container 5ccf95cd835fdf7cd91728243089ced3c255d25157d8831c6675e309ef5df8ce: Status 404 returned error can't find the container with id 5ccf95cd835fdf7cd91728243089ced3c255d25157d8831c6675e309ef5df8ce Dec 06 06:01:00 crc kubenswrapper[4706]: I1206 06:01:00.972566 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29416681-gqk5x"] Dec 06 06:01:01 crc kubenswrapper[4706]: I1206 06:01:01.347576 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29416681-gqk5x" event={"ID":"32439274-bc88-4aa9-b040-98212cda2b38","Type":"ContainerStarted","Data":"9ccfea82dbc89972e3027eb20a2a8c4f71af8168a814e38072d4b719202b6dde"} Dec 06 06:01:01 crc kubenswrapper[4706]: I1206 06:01:01.347944 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29416681-gqk5x" event={"ID":"32439274-bc88-4aa9-b040-98212cda2b38","Type":"ContainerStarted","Data":"5ccf95cd835fdf7cd91728243089ced3c255d25157d8831c6675e309ef5df8ce"} Dec 06 06:01:01 crc kubenswrapper[4706]: I1206 06:01:01.381133 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29416681-gqk5x" podStartSLOduration=1.381111425 podStartE2EDuration="1.381111425s" podCreationTimestamp="2025-12-06 06:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 06:01:01.371516426 +0000 UTC m=+2483.699340380" watchObservedRunningTime="2025-12-06 06:01:01.381111425 +0000 UTC m=+2483.708935369" Dec 06 06:01:03 crc kubenswrapper[4706]: I1206 06:01:03.368578 4706 generic.go:334] "Generic (PLEG): container finished" podID="32439274-bc88-4aa9-b040-98212cda2b38" containerID="9ccfea82dbc89972e3027eb20a2a8c4f71af8168a814e38072d4b719202b6dde" exitCode=0 Dec 06 06:01:03 crc kubenswrapper[4706]: I1206 06:01:03.368661 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29416681-gqk5x" event={"ID":"32439274-bc88-4aa9-b040-98212cda2b38","Type":"ContainerDied","Data":"9ccfea82dbc89972e3027eb20a2a8c4f71af8168a814e38072d4b719202b6dde"} Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.722114 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.795830 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-combined-ca-bundle\") pod \"32439274-bc88-4aa9-b040-98212cda2b38\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.795932 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-fernet-keys\") pod \"32439274-bc88-4aa9-b040-98212cda2b38\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.795956 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-67qzp\" (UniqueName: \"kubernetes.io/projected/32439274-bc88-4aa9-b040-98212cda2b38-kube-api-access-67qzp\") pod \"32439274-bc88-4aa9-b040-98212cda2b38\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.796122 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-config-data\") pod \"32439274-bc88-4aa9-b040-98212cda2b38\" (UID: \"32439274-bc88-4aa9-b040-98212cda2b38\") " Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.802097 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "32439274-bc88-4aa9-b040-98212cda2b38" (UID: "32439274-bc88-4aa9-b040-98212cda2b38"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.802380 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32439274-bc88-4aa9-b040-98212cda2b38-kube-api-access-67qzp" (OuterVolumeSpecName: "kube-api-access-67qzp") pod "32439274-bc88-4aa9-b040-98212cda2b38" (UID: "32439274-bc88-4aa9-b040-98212cda2b38"). InnerVolumeSpecName "kube-api-access-67qzp". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.824128 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "32439274-bc88-4aa9-b040-98212cda2b38" (UID: "32439274-bc88-4aa9-b040-98212cda2b38"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.846844 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-config-data" (OuterVolumeSpecName: "config-data") pod "32439274-bc88-4aa9-b040-98212cda2b38" (UID: "32439274-bc88-4aa9-b040-98212cda2b38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.897983 4706 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.898300 4706 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-fernet-keys\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.898310 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-67qzp\" (UniqueName: \"kubernetes.io/projected/32439274-bc88-4aa9-b040-98212cda2b38-kube-api-access-67qzp\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:04 crc kubenswrapper[4706]: I1206 06:01:04.898321 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/32439274-bc88-4aa9-b040-98212cda2b38-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:05 crc kubenswrapper[4706]: I1206 06:01:05.393516 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29416681-gqk5x" event={"ID":"32439274-bc88-4aa9-b040-98212cda2b38","Type":"ContainerDied","Data":"5ccf95cd835fdf7cd91728243089ced3c255d25157d8831c6675e309ef5df8ce"} Dec 06 06:01:05 crc kubenswrapper[4706]: I1206 06:01:05.393566 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ccf95cd835fdf7cd91728243089ced3c255d25157d8831c6675e309ef5df8ce" Dec 06 06:01:05 crc kubenswrapper[4706]: I1206 06:01:05.393656 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29416681-gqk5x" Dec 06 06:01:09 crc kubenswrapper[4706]: I1206 06:01:09.427955 4706 generic.go:334] "Generic (PLEG): container finished" podID="a71e1253-a40e-4b2b-b911-c15a88da2be5" containerID="0e3cb29edf621147ea0ecc1007146c4bb575333c3d7f4e8eb9fc9491e225e10b" exitCode=0 Dec 06 06:01:09 crc kubenswrapper[4706]: I1206 06:01:09.428069 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" event={"ID":"a71e1253-a40e-4b2b-b911-c15a88da2be5","Type":"ContainerDied","Data":"0e3cb29edf621147ea0ecc1007146c4bb575333c3d7f4e8eb9fc9491e225e10b"} Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.816682 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.926405 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-inventory\") pod \"a71e1253-a40e-4b2b-b911-c15a88da2be5\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.926519 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-ssh-key\") pod \"a71e1253-a40e-4b2b-b911-c15a88da2be5\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.926570 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csz4m\" (UniqueName: \"kubernetes.io/projected/a71e1253-a40e-4b2b-b911-c15a88da2be5-kube-api-access-csz4m\") pod \"a71e1253-a40e-4b2b-b911-c15a88da2be5\" (UID: \"a71e1253-a40e-4b2b-b911-c15a88da2be5\") " Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.931711 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a71e1253-a40e-4b2b-b911-c15a88da2be5-kube-api-access-csz4m" (OuterVolumeSpecName: "kube-api-access-csz4m") pod "a71e1253-a40e-4b2b-b911-c15a88da2be5" (UID: "a71e1253-a40e-4b2b-b911-c15a88da2be5"). InnerVolumeSpecName "kube-api-access-csz4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.953880 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a71e1253-a40e-4b2b-b911-c15a88da2be5" (UID: "a71e1253-a40e-4b2b-b911-c15a88da2be5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:10 crc kubenswrapper[4706]: I1206 06:01:10.954211 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-inventory" (OuterVolumeSpecName: "inventory") pod "a71e1253-a40e-4b2b-b911-c15a88da2be5" (UID: "a71e1253-a40e-4b2b-b911-c15a88da2be5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.029100 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.029135 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a71e1253-a40e-4b2b-b911-c15a88da2be5-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.029149 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csz4m\" (UniqueName: \"kubernetes.io/projected/a71e1253-a40e-4b2b-b911-c15a88da2be5-kube-api-access-csz4m\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.446374 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" event={"ID":"a71e1253-a40e-4b2b-b911-c15a88da2be5","Type":"ContainerDied","Data":"7525e5594338ab8f1e1347744409d5e177846ab80c479a286b8eeae931a30579"} Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.446589 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7525e5594338ab8f1e1347744409d5e177846ab80c479a286b8eeae931a30579" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.446493 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.523840 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-gf4l9"] Dec 06 06:01:11 crc kubenswrapper[4706]: E1206 06:01:11.524247 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32439274-bc88-4aa9-b040-98212cda2b38" containerName="keystone-cron" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.524265 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="32439274-bc88-4aa9-b040-98212cda2b38" containerName="keystone-cron" Dec 06 06:01:11 crc kubenswrapper[4706]: E1206 06:01:11.524323 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a71e1253-a40e-4b2b-b911-c15a88da2be5" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.524331 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a71e1253-a40e-4b2b-b911-c15a88da2be5" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.524559 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="32439274-bc88-4aa9-b040-98212cda2b38" containerName="keystone-cron" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.524577 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a71e1253-a40e-4b2b-b911-c15a88da2be5" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.525310 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.527031 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.527097 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.527362 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.528535 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.536609 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-gf4l9"] Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.639506 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-665bq\" (UniqueName: \"kubernetes.io/projected/fe026aef-fa96-451a-b38d-de4406116ea7-kube-api-access-665bq\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.639732 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.639766 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.740991 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.741038 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.741092 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-665bq\" (UniqueName: \"kubernetes.io/projected/fe026aef-fa96-451a-b38d-de4406116ea7-kube-api-access-665bq\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.745004 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.745850 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.757475 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-665bq\" (UniqueName: \"kubernetes.io/projected/fe026aef-fa96-451a-b38d-de4406116ea7-kube-api-access-665bq\") pod \"ssh-known-hosts-edpm-deployment-gf4l9\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:11 crc kubenswrapper[4706]: I1206 06:01:11.842248 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:12 crc kubenswrapper[4706]: I1206 06:01:12.360525 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-gf4l9"] Dec 06 06:01:12 crc kubenswrapper[4706]: I1206 06:01:12.457126 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" event={"ID":"fe026aef-fa96-451a-b38d-de4406116ea7","Type":"ContainerStarted","Data":"1e6a5e19ed7e6dba5d736a5a9c778b5c204abc3b072553234760a7ec91719fc8"} Dec 06 06:01:13 crc kubenswrapper[4706]: I1206 06:01:13.036446 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:01:13 crc kubenswrapper[4706]: E1206 06:01:13.036944 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:01:13 crc kubenswrapper[4706]: I1206 06:01:13.468363 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" event={"ID":"fe026aef-fa96-451a-b38d-de4406116ea7","Type":"ContainerStarted","Data":"ac3965b6ba82eda7b4b25ea26558915e84968ee202b4ed79f9bc823019933096"} Dec 06 06:01:13 crc kubenswrapper[4706]: I1206 06:01:13.492455 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" podStartSLOduration=2.268522248 podStartE2EDuration="2.492438356s" podCreationTimestamp="2025-12-06 06:01:11 +0000 UTC" firstStartedPulling="2025-12-06 06:01:12.367893932 +0000 UTC m=+2494.695717876" lastFinishedPulling="2025-12-06 06:01:12.59181004 +0000 UTC m=+2494.919633984" observedRunningTime="2025-12-06 06:01:13.486492245 +0000 UTC m=+2495.814316189" watchObservedRunningTime="2025-12-06 06:01:13.492438356 +0000 UTC m=+2495.820262300" Dec 06 06:01:19 crc kubenswrapper[4706]: I1206 06:01:19.521523 4706 generic.go:334] "Generic (PLEG): container finished" podID="fe026aef-fa96-451a-b38d-de4406116ea7" containerID="ac3965b6ba82eda7b4b25ea26558915e84968ee202b4ed79f9bc823019933096" exitCode=0 Dec 06 06:01:19 crc kubenswrapper[4706]: I1206 06:01:19.521614 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" event={"ID":"fe026aef-fa96-451a-b38d-de4406116ea7","Type":"ContainerDied","Data":"ac3965b6ba82eda7b4b25ea26558915e84968ee202b4ed79f9bc823019933096"} Dec 06 06:01:20 crc kubenswrapper[4706]: I1206 06:01:20.971412 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.021861 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-665bq\" (UniqueName: \"kubernetes.io/projected/fe026aef-fa96-451a-b38d-de4406116ea7-kube-api-access-665bq\") pod \"fe026aef-fa96-451a-b38d-de4406116ea7\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.022104 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-inventory-0\") pod \"fe026aef-fa96-451a-b38d-de4406116ea7\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.022326 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-ssh-key-openstack-edpm-ipam\") pod \"fe026aef-fa96-451a-b38d-de4406116ea7\" (UID: \"fe026aef-fa96-451a-b38d-de4406116ea7\") " Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.029473 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe026aef-fa96-451a-b38d-de4406116ea7-kube-api-access-665bq" (OuterVolumeSpecName: "kube-api-access-665bq") pod "fe026aef-fa96-451a-b38d-de4406116ea7" (UID: "fe026aef-fa96-451a-b38d-de4406116ea7"). InnerVolumeSpecName "kube-api-access-665bq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.054996 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "fe026aef-fa96-451a-b38d-de4406116ea7" (UID: "fe026aef-fa96-451a-b38d-de4406116ea7"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.055888 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "fe026aef-fa96-451a-b38d-de4406116ea7" (UID: "fe026aef-fa96-451a-b38d-de4406116ea7"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.124204 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-665bq\" (UniqueName: \"kubernetes.io/projected/fe026aef-fa96-451a-b38d-de4406116ea7-kube-api-access-665bq\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.124249 4706 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-inventory-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.124264 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/fe026aef-fa96-451a-b38d-de4406116ea7-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.541037 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" event={"ID":"fe026aef-fa96-451a-b38d-de4406116ea7","Type":"ContainerDied","Data":"1e6a5e19ed7e6dba5d736a5a9c778b5c204abc3b072553234760a7ec91719fc8"} Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.541399 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e6a5e19ed7e6dba5d736a5a9c778b5c204abc3b072553234760a7ec91719fc8" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.541141 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-gf4l9" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.613282 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j"] Dec 06 06:01:21 crc kubenswrapper[4706]: E1206 06:01:21.613709 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe026aef-fa96-451a-b38d-de4406116ea7" containerName="ssh-known-hosts-edpm-deployment" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.613727 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe026aef-fa96-451a-b38d-de4406116ea7" containerName="ssh-known-hosts-edpm-deployment" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.613900 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe026aef-fa96-451a-b38d-de4406116ea7" containerName="ssh-known-hosts-edpm-deployment" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.614521 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.616545 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.616668 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.616715 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.618829 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.626370 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j"] Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.761137 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.761191 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cn62\" (UniqueName: \"kubernetes.io/projected/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-kube-api-access-8cn62\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.761276 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.862842 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.863293 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.863419 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cn62\" (UniqueName: \"kubernetes.io/projected/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-kube-api-access-8cn62\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.867242 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.872622 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.880019 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cn62\" (UniqueName: \"kubernetes.io/projected/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-kube-api-access-8cn62\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-6bt4j\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:21 crc kubenswrapper[4706]: I1206 06:01:21.970514 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:22 crc kubenswrapper[4706]: I1206 06:01:22.463144 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j"] Dec 06 06:01:22 crc kubenswrapper[4706]: I1206 06:01:22.550829 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" event={"ID":"70676b1a-d6a7-4b05-b15a-fa2661a1a77b","Type":"ContainerStarted","Data":"6d7aa5ab3a41d08f8c77b19aeff63e92d16e3a42881d126800a16f3d3baff52d"} Dec 06 06:01:23 crc kubenswrapper[4706]: I1206 06:01:23.562110 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" event={"ID":"70676b1a-d6a7-4b05-b15a-fa2661a1a77b","Type":"ContainerStarted","Data":"b23ea6f8914e0459e63c7ae45b3c03f9dbc4a296ac6aa8f36eb74d6e84c82aa2"} Dec 06 06:01:23 crc kubenswrapper[4706]: I1206 06:01:23.592788 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" podStartSLOduration=2.400105838 podStartE2EDuration="2.59276915s" podCreationTimestamp="2025-12-06 06:01:21 +0000 UTC" firstStartedPulling="2025-12-06 06:01:22.471332851 +0000 UTC m=+2504.799156795" lastFinishedPulling="2025-12-06 06:01:22.663996163 +0000 UTC m=+2504.991820107" observedRunningTime="2025-12-06 06:01:23.592342239 +0000 UTC m=+2505.920166203" watchObservedRunningTime="2025-12-06 06:01:23.59276915 +0000 UTC m=+2505.920593094" Dec 06 06:01:24 crc kubenswrapper[4706]: I1206 06:01:24.040540 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:01:24 crc kubenswrapper[4706]: E1206 06:01:24.040842 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:01:30 crc kubenswrapper[4706]: I1206 06:01:30.615822 4706 generic.go:334] "Generic (PLEG): container finished" podID="70676b1a-d6a7-4b05-b15a-fa2661a1a77b" containerID="b23ea6f8914e0459e63c7ae45b3c03f9dbc4a296ac6aa8f36eb74d6e84c82aa2" exitCode=0 Dec 06 06:01:30 crc kubenswrapper[4706]: I1206 06:01:30.615907 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" event={"ID":"70676b1a-d6a7-4b05-b15a-fa2661a1a77b","Type":"ContainerDied","Data":"b23ea6f8914e0459e63c7ae45b3c03f9dbc4a296ac6aa8f36eb74d6e84c82aa2"} Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.037254 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.180305 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-inventory\") pod \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.180488 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cn62\" (UniqueName: \"kubernetes.io/projected/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-kube-api-access-8cn62\") pod \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.181353 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-ssh-key\") pod \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\" (UID: \"70676b1a-d6a7-4b05-b15a-fa2661a1a77b\") " Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.187371 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-kube-api-access-8cn62" (OuterVolumeSpecName: "kube-api-access-8cn62") pod "70676b1a-d6a7-4b05-b15a-fa2661a1a77b" (UID: "70676b1a-d6a7-4b05-b15a-fa2661a1a77b"). InnerVolumeSpecName "kube-api-access-8cn62". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.209734 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "70676b1a-d6a7-4b05-b15a-fa2661a1a77b" (UID: "70676b1a-d6a7-4b05-b15a-fa2661a1a77b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.213525 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-inventory" (OuterVolumeSpecName: "inventory") pod "70676b1a-d6a7-4b05-b15a-fa2661a1a77b" (UID: "70676b1a-d6a7-4b05-b15a-fa2661a1a77b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.285646 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.285688 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cn62\" (UniqueName: \"kubernetes.io/projected/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-kube-api-access-8cn62\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.285701 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/70676b1a-d6a7-4b05-b15a-fa2661a1a77b-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.632689 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" event={"ID":"70676b1a-d6a7-4b05-b15a-fa2661a1a77b","Type":"ContainerDied","Data":"6d7aa5ab3a41d08f8c77b19aeff63e92d16e3a42881d126800a16f3d3baff52d"} Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.632729 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6d7aa5ab3a41d08f8c77b19aeff63e92d16e3a42881d126800a16f3d3baff52d" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.633035 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-6bt4j" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.709408 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5"] Dec 06 06:01:32 crc kubenswrapper[4706]: E1206 06:01:32.710819 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70676b1a-d6a7-4b05-b15a-fa2661a1a77b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.710844 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="70676b1a-d6a7-4b05-b15a-fa2661a1a77b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.711025 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="70676b1a-d6a7-4b05-b15a-fa2661a1a77b" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.711707 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.714152 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.714169 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.714410 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.714585 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.719523 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5"] Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.798553 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.798689 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.798879 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h89g\" (UniqueName: \"kubernetes.io/projected/5d6e830f-730f-43e2-8218-e247e8a663df-kube-api-access-5h89g\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.900811 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.900907 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.900999 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h89g\" (UniqueName: \"kubernetes.io/projected/5d6e830f-730f-43e2-8218-e247e8a663df-kube-api-access-5h89g\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.905454 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.905617 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:32 crc kubenswrapper[4706]: I1206 06:01:32.915948 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h89g\" (UniqueName: \"kubernetes.io/projected/5d6e830f-730f-43e2-8218-e247e8a663df-kube-api-access-5h89g\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:33 crc kubenswrapper[4706]: I1206 06:01:33.026439 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:33 crc kubenswrapper[4706]: I1206 06:01:33.558518 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5"] Dec 06 06:01:33 crc kubenswrapper[4706]: W1206 06:01:33.569250 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d6e830f_730f_43e2_8218_e247e8a663df.slice/crio-adefcc22ec5df95763f198c1219ee327f00c7d395a3951922749c022b327b4d7 WatchSource:0}: Error finding container adefcc22ec5df95763f198c1219ee327f00c7d395a3951922749c022b327b4d7: Status 404 returned error can't find the container with id adefcc22ec5df95763f198c1219ee327f00c7d395a3951922749c022b327b4d7 Dec 06 06:01:33 crc kubenswrapper[4706]: I1206 06:01:33.644527 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" event={"ID":"5d6e830f-730f-43e2-8218-e247e8a663df","Type":"ContainerStarted","Data":"adefcc22ec5df95763f198c1219ee327f00c7d395a3951922749c022b327b4d7"} Dec 06 06:01:34 crc kubenswrapper[4706]: I1206 06:01:34.652124 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" event={"ID":"5d6e830f-730f-43e2-8218-e247e8a663df","Type":"ContainerStarted","Data":"58d5de90b7797d6ae4cbadb9bc61364793ceec91533979571db879fab4898c2f"} Dec 06 06:01:34 crc kubenswrapper[4706]: I1206 06:01:34.666893 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" podStartSLOduration=2.498770612 podStartE2EDuration="2.666871079s" podCreationTimestamp="2025-12-06 06:01:32 +0000 UTC" firstStartedPulling="2025-12-06 06:01:33.571217808 +0000 UTC m=+2515.899041752" lastFinishedPulling="2025-12-06 06:01:33.739318275 +0000 UTC m=+2516.067142219" observedRunningTime="2025-12-06 06:01:34.664075364 +0000 UTC m=+2516.991899318" watchObservedRunningTime="2025-12-06 06:01:34.666871079 +0000 UTC m=+2516.994695023" Dec 06 06:01:37 crc kubenswrapper[4706]: I1206 06:01:37.036189 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:01:37 crc kubenswrapper[4706]: E1206 06:01:37.036802 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:01:43 crc kubenswrapper[4706]: I1206 06:01:43.726109 4706 generic.go:334] "Generic (PLEG): container finished" podID="5d6e830f-730f-43e2-8218-e247e8a663df" containerID="58d5de90b7797d6ae4cbadb9bc61364793ceec91533979571db879fab4898c2f" exitCode=0 Dec 06 06:01:43 crc kubenswrapper[4706]: I1206 06:01:43.726192 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" event={"ID":"5d6e830f-730f-43e2-8218-e247e8a663df","Type":"ContainerDied","Data":"58d5de90b7797d6ae4cbadb9bc61364793ceec91533979571db879fab4898c2f"} Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.122983 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.232142 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5h89g\" (UniqueName: \"kubernetes.io/projected/5d6e830f-730f-43e2-8218-e247e8a663df-kube-api-access-5h89g\") pod \"5d6e830f-730f-43e2-8218-e247e8a663df\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.232270 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-ssh-key\") pod \"5d6e830f-730f-43e2-8218-e247e8a663df\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.232310 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-inventory\") pod \"5d6e830f-730f-43e2-8218-e247e8a663df\" (UID: \"5d6e830f-730f-43e2-8218-e247e8a663df\") " Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.239345 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5d6e830f-730f-43e2-8218-e247e8a663df-kube-api-access-5h89g" (OuterVolumeSpecName: "kube-api-access-5h89g") pod "5d6e830f-730f-43e2-8218-e247e8a663df" (UID: "5d6e830f-730f-43e2-8218-e247e8a663df"). InnerVolumeSpecName "kube-api-access-5h89g". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.259243 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-inventory" (OuterVolumeSpecName: "inventory") pod "5d6e830f-730f-43e2-8218-e247e8a663df" (UID: "5d6e830f-730f-43e2-8218-e247e8a663df"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.263214 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5d6e830f-730f-43e2-8218-e247e8a663df" (UID: "5d6e830f-730f-43e2-8218-e247e8a663df"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.339477 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.339522 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5d6e830f-730f-43e2-8218-e247e8a663df-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.339536 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5h89g\" (UniqueName: \"kubernetes.io/projected/5d6e830f-730f-43e2-8218-e247e8a663df-kube-api-access-5h89g\") on node \"crc\" DevicePath \"\"" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.745593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" event={"ID":"5d6e830f-730f-43e2-8218-e247e8a663df","Type":"ContainerDied","Data":"adefcc22ec5df95763f198c1219ee327f00c7d395a3951922749c022b327b4d7"} Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.745628 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="adefcc22ec5df95763f198c1219ee327f00c7d395a3951922749c022b327b4d7" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.746102 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.836607 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg"] Dec 06 06:01:45 crc kubenswrapper[4706]: E1206 06:01:45.837036 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5d6e830f-730f-43e2-8218-e247e8a663df" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.837087 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5d6e830f-730f-43e2-8218-e247e8a663df" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.837277 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="5d6e830f-730f-43e2-8218-e247e8a663df" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.837944 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.839885 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.839936 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.840590 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.841228 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.841418 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.841996 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.843006 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.843575 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.849158 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg"] Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948412 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948457 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948478 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948749 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948822 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v47h5\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-kube-api-access-v47h5\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948872 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.948957 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949078 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949183 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949238 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949297 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949327 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949349 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:45 crc kubenswrapper[4706]: I1206 06:01:45.949369 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.051936 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052032 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052122 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052159 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052196 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052223 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052264 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052303 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052342 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052424 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052458 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v47h5\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-kube-api-access-v47h5\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052497 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052541 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.052608 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.059900 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.060183 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.060183 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.061078 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.061146 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.061451 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.063073 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.063986 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.064988 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.068189 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.068677 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.069812 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.071571 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.076938 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v47h5\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-kube-api-access-v47h5\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.157498 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.686697 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg"] Dec 06 06:01:46 crc kubenswrapper[4706]: I1206 06:01:46.756279 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" event={"ID":"cc1a17c8-f209-4fb0-9fd5-d17086f90eba","Type":"ContainerStarted","Data":"459d4b492e947dce2227d035ff6124b8bfe6cac935b49206fdf8410cd9816f35"} Dec 06 06:01:47 crc kubenswrapper[4706]: I1206 06:01:47.767221 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" event={"ID":"cc1a17c8-f209-4fb0-9fd5-d17086f90eba","Type":"ContainerStarted","Data":"47f0831a4e25d31a7b23e3c2cc5f9598267d7795e57618eec02c7c417bcfd0f4"} Dec 06 06:01:51 crc kubenswrapper[4706]: I1206 06:01:51.036675 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:01:51 crc kubenswrapper[4706]: E1206 06:01:51.037465 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:02:05 crc kubenswrapper[4706]: I1206 06:02:05.037082 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:02:05 crc kubenswrapper[4706]: E1206 06:02:05.038185 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:02:18 crc kubenswrapper[4706]: I1206 06:02:18.041855 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:02:18 crc kubenswrapper[4706]: E1206 06:02:18.042675 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:02:22 crc kubenswrapper[4706]: I1206 06:02:22.039475 4706 generic.go:334] "Generic (PLEG): container finished" podID="cc1a17c8-f209-4fb0-9fd5-d17086f90eba" containerID="47f0831a4e25d31a7b23e3c2cc5f9598267d7795e57618eec02c7c417bcfd0f4" exitCode=0 Dec 06 06:02:22 crc kubenswrapper[4706]: I1206 06:02:22.047446 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" event={"ID":"cc1a17c8-f209-4fb0-9fd5-d17086f90eba","Type":"ContainerDied","Data":"47f0831a4e25d31a7b23e3c2cc5f9598267d7795e57618eec02c7c417bcfd0f4"} Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.423083 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.529990 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ssh-key\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530270 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-libvirt-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530368 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-nova-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530442 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530484 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-repo-setup-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530521 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-neutron-metadata-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530563 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47h5\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-kube-api-access-v47h5\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530591 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-inventory\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530662 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ovn-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530708 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530729 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-telemetry-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530781 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-bootstrap-combined-ca-bundle\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530806 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-ovn-default-certs-0\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.530822 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\" (UID: \"cc1a17c8-f209-4fb0-9fd5-d17086f90eba\") " Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.538098 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.538148 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.538212 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.538246 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.538529 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.538934 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.539066 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.539074 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.539666 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.541325 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.545323 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-kube-api-access-v47h5" (OuterVolumeSpecName: "kube-api-access-v47h5") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "kube-api-access-v47h5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.545417 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.565223 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-inventory" (OuterVolumeSpecName: "inventory") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.568126 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "cc1a17c8-f209-4fb0-9fd5-d17086f90eba" (UID: "cc1a17c8-f209-4fb0-9fd5-d17086f90eba"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634624 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634671 4706 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634687 4706 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634701 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634715 4706 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634733 4706 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634748 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47h5\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-kube-api-access-v47h5\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634760 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634774 4706 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634788 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634800 4706 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634815 4706 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634831 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:23 crc kubenswrapper[4706]: I1206 06:02:23.634870 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/cc1a17c8-f209-4fb0-9fd5-d17086f90eba-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.060170 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" event={"ID":"cc1a17c8-f209-4fb0-9fd5-d17086f90eba","Type":"ContainerDied","Data":"459d4b492e947dce2227d035ff6124b8bfe6cac935b49206fdf8410cd9816f35"} Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.060204 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.060242 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="459d4b492e947dce2227d035ff6124b8bfe6cac935b49206fdf8410cd9816f35" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.255012 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l"] Dec 06 06:02:24 crc kubenswrapper[4706]: E1206 06:02:24.255510 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cc1a17c8-f209-4fb0-9fd5-d17086f90eba" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.255524 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="cc1a17c8-f209-4fb0-9fd5-d17086f90eba" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.255766 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="cc1a17c8-f209-4fb0-9fd5-d17086f90eba" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.256656 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.259210 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.259457 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.259605 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.259935 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.260094 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.267552 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l"] Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.449722 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.450079 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.450129 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.450382 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.450415 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qm9tm\" (UniqueName: \"kubernetes.io/projected/24cc16ad-5e43-4d54-bdf8-69d4f319907c-kube-api-access-qm9tm\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.551651 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.551695 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.551798 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.551817 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qm9tm\" (UniqueName: \"kubernetes.io/projected/24cc16ad-5e43-4d54-bdf8-69d4f319907c-kube-api-access-qm9tm\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.551850 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.554915 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.557595 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.557857 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.563332 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.573824 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qm9tm\" (UniqueName: \"kubernetes.io/projected/24cc16ad-5e43-4d54-bdf8-69d4f319907c-kube-api-access-qm9tm\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-wb66l\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:24 crc kubenswrapper[4706]: I1206 06:02:24.584380 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:02:25 crc kubenswrapper[4706]: I1206 06:02:25.122374 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l"] Dec 06 06:02:26 crc kubenswrapper[4706]: I1206 06:02:26.079256 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" event={"ID":"24cc16ad-5e43-4d54-bdf8-69d4f319907c","Type":"ContainerStarted","Data":"6c3016912b97470470d5196e15a26aec663e88096a9ff1d6e96064680eb90d16"} Dec 06 06:02:26 crc kubenswrapper[4706]: I1206 06:02:26.079648 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" event={"ID":"24cc16ad-5e43-4d54-bdf8-69d4f319907c","Type":"ContainerStarted","Data":"698b910f1c46d1800276f603f7e0f8291187a4e91d0d250f55f08f229506e815"} Dec 06 06:02:26 crc kubenswrapper[4706]: I1206 06:02:26.105973 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" podStartSLOduration=1.883932798 podStartE2EDuration="2.105955314s" podCreationTimestamp="2025-12-06 06:02:24 +0000 UTC" firstStartedPulling="2025-12-06 06:02:25.137543815 +0000 UTC m=+2567.465367759" lastFinishedPulling="2025-12-06 06:02:25.359566331 +0000 UTC m=+2567.687390275" observedRunningTime="2025-12-06 06:02:26.095415539 +0000 UTC m=+2568.423239493" watchObservedRunningTime="2025-12-06 06:02:26.105955314 +0000 UTC m=+2568.433779258" Dec 06 06:02:33 crc kubenswrapper[4706]: I1206 06:02:33.036865 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:02:33 crc kubenswrapper[4706]: E1206 06:02:33.037580 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:02:48 crc kubenswrapper[4706]: I1206 06:02:48.063078 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:02:48 crc kubenswrapper[4706]: E1206 06:02:48.069693 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:03:03 crc kubenswrapper[4706]: I1206 06:03:03.036599 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:03:03 crc kubenswrapper[4706]: E1206 06:03:03.037642 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:03:18 crc kubenswrapper[4706]: I1206 06:03:18.042849 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:03:18 crc kubenswrapper[4706]: E1206 06:03:18.043974 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:03:28 crc kubenswrapper[4706]: I1206 06:03:28.980914 4706 generic.go:334] "Generic (PLEG): container finished" podID="24cc16ad-5e43-4d54-bdf8-69d4f319907c" containerID="6c3016912b97470470d5196e15a26aec663e88096a9ff1d6e96064680eb90d16" exitCode=0 Dec 06 06:03:28 crc kubenswrapper[4706]: I1206 06:03:28.981595 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" event={"ID":"24cc16ad-5e43-4d54-bdf8-69d4f319907c","Type":"ContainerDied","Data":"6c3016912b97470470d5196e15a26aec663e88096a9ff1d6e96064680eb90d16"} Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.437132 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.580869 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ssh-key\") pod \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.581034 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovn-combined-ca-bundle\") pod \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.581142 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovncontroller-config-0\") pod \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.581183 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qm9tm\" (UniqueName: \"kubernetes.io/projected/24cc16ad-5e43-4d54-bdf8-69d4f319907c-kube-api-access-qm9tm\") pod \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.581251 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-inventory\") pod \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\" (UID: \"24cc16ad-5e43-4d54-bdf8-69d4f319907c\") " Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.587399 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24cc16ad-5e43-4d54-bdf8-69d4f319907c-kube-api-access-qm9tm" (OuterVolumeSpecName: "kube-api-access-qm9tm") pod "24cc16ad-5e43-4d54-bdf8-69d4f319907c" (UID: "24cc16ad-5e43-4d54-bdf8-69d4f319907c"). InnerVolumeSpecName "kube-api-access-qm9tm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.587430 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "24cc16ad-5e43-4d54-bdf8-69d4f319907c" (UID: "24cc16ad-5e43-4d54-bdf8-69d4f319907c"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.610869 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-inventory" (OuterVolumeSpecName: "inventory") pod "24cc16ad-5e43-4d54-bdf8-69d4f319907c" (UID: "24cc16ad-5e43-4d54-bdf8-69d4f319907c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.611237 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "24cc16ad-5e43-4d54-bdf8-69d4f319907c" (UID: "24cc16ad-5e43-4d54-bdf8-69d4f319907c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.611591 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "24cc16ad-5e43-4d54-bdf8-69d4f319907c" (UID: "24cc16ad-5e43-4d54-bdf8-69d4f319907c"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.683957 4706 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.683993 4706 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.684002 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qm9tm\" (UniqueName: \"kubernetes.io/projected/24cc16ad-5e43-4d54-bdf8-69d4f319907c-kube-api-access-qm9tm\") on node \"crc\" DevicePath \"\"" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.684012 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:03:30 crc kubenswrapper[4706]: I1206 06:03:30.684023 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/24cc16ad-5e43-4d54-bdf8-69d4f319907c-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.004949 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" event={"ID":"24cc16ad-5e43-4d54-bdf8-69d4f319907c","Type":"ContainerDied","Data":"698b910f1c46d1800276f603f7e0f8291187a4e91d0d250f55f08f229506e815"} Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.005322 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="698b910f1c46d1800276f603f7e0f8291187a4e91d0d250f55f08f229506e815" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.005332 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-wb66l" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.095706 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx"] Dec 06 06:03:31 crc kubenswrapper[4706]: E1206 06:03:31.096119 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24cc16ad-5e43-4d54-bdf8-69d4f319907c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.096137 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="24cc16ad-5e43-4d54-bdf8-69d4f319907c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.096331 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="24cc16ad-5e43-4d54-bdf8-69d4f319907c" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.096943 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.105894 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.106285 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.106347 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.106698 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.106874 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx"] Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.106885 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.106937 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.200165 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.200248 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgltq\" (UniqueName: \"kubernetes.io/projected/d67f85a9-c64e-42f0-b686-bfb179dccc76-kube-api-access-sgltq\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.200289 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.200319 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.200359 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.200509 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.302479 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.302573 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgltq\" (UniqueName: \"kubernetes.io/projected/d67f85a9-c64e-42f0-b686-bfb179dccc76-kube-api-access-sgltq\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.302610 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.302646 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.302690 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.302763 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.307722 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.308231 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.309197 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.309703 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.316721 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.327222 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgltq\" (UniqueName: \"kubernetes.io/projected/d67f85a9-c64e-42f0-b686-bfb179dccc76-kube-api-access-sgltq\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:31 crc kubenswrapper[4706]: I1206 06:03:31.416207 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:03:32 crc kubenswrapper[4706]: I1206 06:03:32.000917 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx"] Dec 06 06:03:32 crc kubenswrapper[4706]: I1206 06:03:32.025966 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" event={"ID":"d67f85a9-c64e-42f0-b686-bfb179dccc76","Type":"ContainerStarted","Data":"4e581c7d151f7eb26170b718a664472e1365523efa6d8742ecd1c78f38d96d66"} Dec 06 06:03:32 crc kubenswrapper[4706]: I1206 06:03:32.035879 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:03:32 crc kubenswrapper[4706]: E1206 06:03:32.036133 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:03:33 crc kubenswrapper[4706]: I1206 06:03:33.035946 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" event={"ID":"d67f85a9-c64e-42f0-b686-bfb179dccc76","Type":"ContainerStarted","Data":"1c0cd3b0d67b2c25b6c210c6b4bb2945814a768bcb771e32fc1b6758f8cdfd38"} Dec 06 06:03:33 crc kubenswrapper[4706]: I1206 06:03:33.065851 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" podStartSLOduration=1.5590148369999999 podStartE2EDuration="2.065827619s" podCreationTimestamp="2025-12-06 06:03:31 +0000 UTC" firstStartedPulling="2025-12-06 06:03:32.009319865 +0000 UTC m=+2634.337143829" lastFinishedPulling="2025-12-06 06:03:32.516132667 +0000 UTC m=+2634.843956611" observedRunningTime="2025-12-06 06:03:33.055786567 +0000 UTC m=+2635.383610511" watchObservedRunningTime="2025-12-06 06:03:33.065827619 +0000 UTC m=+2635.393651563" Dec 06 06:03:47 crc kubenswrapper[4706]: I1206 06:03:47.036627 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:03:47 crc kubenswrapper[4706]: E1206 06:03:47.037870 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:04:01 crc kubenswrapper[4706]: I1206 06:04:01.035697 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:04:01 crc kubenswrapper[4706]: E1206 06:04:01.036472 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:04:16 crc kubenswrapper[4706]: I1206 06:04:16.036081 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:04:16 crc kubenswrapper[4706]: E1206 06:04:16.036885 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:04:24 crc kubenswrapper[4706]: I1206 06:04:24.501234 4706 generic.go:334] "Generic (PLEG): container finished" podID="d67f85a9-c64e-42f0-b686-bfb179dccc76" containerID="1c0cd3b0d67b2c25b6c210c6b4bb2945814a768bcb771e32fc1b6758f8cdfd38" exitCode=0 Dec 06 06:04:24 crc kubenswrapper[4706]: I1206 06:04:24.501301 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" event={"ID":"d67f85a9-c64e-42f0-b686-bfb179dccc76","Type":"ContainerDied","Data":"1c0cd3b0d67b2c25b6c210c6b4bb2945814a768bcb771e32fc1b6758f8cdfd38"} Dec 06 06:04:25 crc kubenswrapper[4706]: I1206 06:04:25.965537 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.139445 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-metadata-combined-ca-bundle\") pod \"d67f85a9-c64e-42f0-b686-bfb179dccc76\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.139507 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-nova-metadata-neutron-config-0\") pod \"d67f85a9-c64e-42f0-b686-bfb179dccc76\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.139542 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-ssh-key\") pod \"d67f85a9-c64e-42f0-b686-bfb179dccc76\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.139560 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgltq\" (UniqueName: \"kubernetes.io/projected/d67f85a9-c64e-42f0-b686-bfb179dccc76-kube-api-access-sgltq\") pod \"d67f85a9-c64e-42f0-b686-bfb179dccc76\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.139645 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-inventory\") pod \"d67f85a9-c64e-42f0-b686-bfb179dccc76\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.139709 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-ovn-metadata-agent-neutron-config-0\") pod \"d67f85a9-c64e-42f0-b686-bfb179dccc76\" (UID: \"d67f85a9-c64e-42f0-b686-bfb179dccc76\") " Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.145122 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "d67f85a9-c64e-42f0-b686-bfb179dccc76" (UID: "d67f85a9-c64e-42f0-b686-bfb179dccc76"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.147064 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d67f85a9-c64e-42f0-b686-bfb179dccc76-kube-api-access-sgltq" (OuterVolumeSpecName: "kube-api-access-sgltq") pod "d67f85a9-c64e-42f0-b686-bfb179dccc76" (UID: "d67f85a9-c64e-42f0-b686-bfb179dccc76"). InnerVolumeSpecName "kube-api-access-sgltq". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.175310 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-inventory" (OuterVolumeSpecName: "inventory") pod "d67f85a9-c64e-42f0-b686-bfb179dccc76" (UID: "d67f85a9-c64e-42f0-b686-bfb179dccc76"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.175654 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d67f85a9-c64e-42f0-b686-bfb179dccc76" (UID: "d67f85a9-c64e-42f0-b686-bfb179dccc76"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.188223 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "d67f85a9-c64e-42f0-b686-bfb179dccc76" (UID: "d67f85a9-c64e-42f0-b686-bfb179dccc76"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.195194 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "d67f85a9-c64e-42f0-b686-bfb179dccc76" (UID: "d67f85a9-c64e-42f0-b686-bfb179dccc76"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.241624 4706 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.241665 4706 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.241682 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.241699 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgltq\" (UniqueName: \"kubernetes.io/projected/d67f85a9-c64e-42f0-b686-bfb179dccc76-kube-api-access-sgltq\") on node \"crc\" DevicePath \"\"" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.241716 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.241731 4706 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/d67f85a9-c64e-42f0-b686-bfb179dccc76-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.526089 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" event={"ID":"d67f85a9-c64e-42f0-b686-bfb179dccc76","Type":"ContainerDied","Data":"4e581c7d151f7eb26170b718a664472e1365523efa6d8742ecd1c78f38d96d66"} Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.526141 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e581c7d151f7eb26170b718a664472e1365523efa6d8742ecd1c78f38d96d66" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.526201 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.651265 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb"] Dec 06 06:04:26 crc kubenswrapper[4706]: E1206 06:04:26.651645 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d67f85a9-c64e-42f0-b686-bfb179dccc76" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.651662 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="d67f85a9-c64e-42f0-b686-bfb179dccc76" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.651863 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="d67f85a9-c64e-42f0-b686-bfb179dccc76" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.652491 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.655263 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.658894 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.659105 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.659302 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.659390 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.664064 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb"] Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.852569 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nqmcg\" (UniqueName: \"kubernetes.io/projected/5620e36a-01d5-4282-ad0c-a3e96dc38329-kube-api-access-nqmcg\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.852639 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.853391 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.853463 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.853710 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.955161 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.955241 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.955306 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.955359 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nqmcg\" (UniqueName: \"kubernetes.io/projected/5620e36a-01d5-4282-ad0c-a3e96dc38329-kube-api-access-nqmcg\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.955407 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.958627 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.958864 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.959079 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.959130 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:26 crc kubenswrapper[4706]: I1206 06:04:26.986174 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nqmcg\" (UniqueName: \"kubernetes.io/projected/5620e36a-01d5-4282-ad0c-a3e96dc38329-kube-api-access-nqmcg\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:27 crc kubenswrapper[4706]: I1206 06:04:27.270332 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:04:27 crc kubenswrapper[4706]: I1206 06:04:27.848845 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb"] Dec 06 06:04:27 crc kubenswrapper[4706]: I1206 06:04:27.860181 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:04:28 crc kubenswrapper[4706]: I1206 06:04:28.548620 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" event={"ID":"5620e36a-01d5-4282-ad0c-a3e96dc38329","Type":"ContainerStarted","Data":"e4c12e1658dd37201a13ce228e8ba1ef84f2fad4a8b9687d0f2ef1d2b9f91c42"} Dec 06 06:04:28 crc kubenswrapper[4706]: I1206 06:04:28.549034 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" event={"ID":"5620e36a-01d5-4282-ad0c-a3e96dc38329","Type":"ContainerStarted","Data":"6858fcb80924ace1b176325ebb386fa192b7ba0e4acf5588fc39c9e9f256f09d"} Dec 06 06:04:28 crc kubenswrapper[4706]: I1206 06:04:28.571463 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" podStartSLOduration=2.351643612 podStartE2EDuration="2.571444899s" podCreationTimestamp="2025-12-06 06:04:26 +0000 UTC" firstStartedPulling="2025-12-06 06:04:27.859948401 +0000 UTC m=+2690.187772345" lastFinishedPulling="2025-12-06 06:04:28.079749688 +0000 UTC m=+2690.407573632" observedRunningTime="2025-12-06 06:04:28.566082154 +0000 UTC m=+2690.893906098" watchObservedRunningTime="2025-12-06 06:04:28.571444899 +0000 UTC m=+2690.899268843" Dec 06 06:04:30 crc kubenswrapper[4706]: I1206 06:04:30.037525 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:04:30 crc kubenswrapper[4706]: E1206 06:04:30.038247 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:04:45 crc kubenswrapper[4706]: I1206 06:04:45.036669 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:04:45 crc kubenswrapper[4706]: E1206 06:04:45.037218 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:04:58 crc kubenswrapper[4706]: I1206 06:04:58.044234 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:04:58 crc kubenswrapper[4706]: E1206 06:04:58.045756 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:05:11 crc kubenswrapper[4706]: I1206 06:05:11.037308 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:05:11 crc kubenswrapper[4706]: I1206 06:05:11.942376 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"e2b74aea2b90c903c5ee5b5ee43d5053c167a45bd911f600901cf01edd2fc497"} Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.667687 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gsmrx"] Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.670707 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.677144 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gsmrx"] Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.832027 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lhfkc\" (UniqueName: \"kubernetes.io/projected/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-kube-api-access-lhfkc\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.832518 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-utilities\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.832664 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-catalog-content\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.934770 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-utilities\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.935241 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-catalog-content\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.935414 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-utilities\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.935623 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-catalog-content\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.935749 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lhfkc\" (UniqueName: \"kubernetes.io/projected/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-kube-api-access-lhfkc\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:09 crc kubenswrapper[4706]: I1206 06:07:09.960708 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lhfkc\" (UniqueName: \"kubernetes.io/projected/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-kube-api-access-lhfkc\") pod \"certified-operators-gsmrx\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:10 crc kubenswrapper[4706]: I1206 06:07:10.020492 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:10 crc kubenswrapper[4706]: I1206 06:07:10.552161 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gsmrx"] Dec 06 06:07:10 crc kubenswrapper[4706]: I1206 06:07:10.995674 4706 generic.go:334] "Generic (PLEG): container finished" podID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerID="05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522" exitCode=0 Dec 06 06:07:10 crc kubenswrapper[4706]: I1206 06:07:10.995770 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerDied","Data":"05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522"} Dec 06 06:07:10 crc kubenswrapper[4706]: I1206 06:07:10.996025 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerStarted","Data":"edef8d4b2c8371404b14d0b6e8d63b431dc975daa65a403ee2973cf06da6fb22"} Dec 06 06:07:12 crc kubenswrapper[4706]: I1206 06:07:12.006909 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerStarted","Data":"d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569"} Dec 06 06:07:13 crc kubenswrapper[4706]: I1206 06:07:13.016408 4706 generic.go:334] "Generic (PLEG): container finished" podID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerID="d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569" exitCode=0 Dec 06 06:07:13 crc kubenswrapper[4706]: I1206 06:07:13.016454 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerDied","Data":"d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569"} Dec 06 06:07:17 crc kubenswrapper[4706]: I1206 06:07:17.053120 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerStarted","Data":"2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2"} Dec 06 06:07:17 crc kubenswrapper[4706]: I1206 06:07:17.071685 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gsmrx" podStartSLOduration=3.069808531 podStartE2EDuration="8.071662781s" podCreationTimestamp="2025-12-06 06:07:09 +0000 UTC" firstStartedPulling="2025-12-06 06:07:10.99709972 +0000 UTC m=+2853.324923664" lastFinishedPulling="2025-12-06 06:07:15.99895397 +0000 UTC m=+2858.326777914" observedRunningTime="2025-12-06 06:07:17.069533603 +0000 UTC m=+2859.397357547" watchObservedRunningTime="2025-12-06 06:07:17.071662781 +0000 UTC m=+2859.399486725" Dec 06 06:07:20 crc kubenswrapper[4706]: I1206 06:07:20.021193 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:20 crc kubenswrapper[4706]: I1206 06:07:20.022125 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:20 crc kubenswrapper[4706]: I1206 06:07:20.074322 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:21 crc kubenswrapper[4706]: I1206 06:07:21.136637 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:21 crc kubenswrapper[4706]: I1206 06:07:21.188911 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gsmrx"] Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.105189 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gsmrx" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="registry-server" containerID="cri-o://2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2" gracePeriod=2 Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.554975 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.707009 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lhfkc\" (UniqueName: \"kubernetes.io/projected/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-kube-api-access-lhfkc\") pod \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.707142 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-catalog-content\") pod \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.707193 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-utilities\") pod \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\" (UID: \"3d4f10c6-fcf1-472e-ad86-e9d986c838ba\") " Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.708305 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-utilities" (OuterVolumeSpecName: "utilities") pod "3d4f10c6-fcf1-472e-ad86-e9d986c838ba" (UID: "3d4f10c6-fcf1-472e-ad86-e9d986c838ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.715318 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-kube-api-access-lhfkc" (OuterVolumeSpecName: "kube-api-access-lhfkc") pod "3d4f10c6-fcf1-472e-ad86-e9d986c838ba" (UID: "3d4f10c6-fcf1-472e-ad86-e9d986c838ba"). InnerVolumeSpecName "kube-api-access-lhfkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.764227 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3d4f10c6-fcf1-472e-ad86-e9d986c838ba" (UID: "3d4f10c6-fcf1-472e-ad86-e9d986c838ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.809797 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lhfkc\" (UniqueName: \"kubernetes.io/projected/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-kube-api-access-lhfkc\") on node \"crc\" DevicePath \"\"" Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.809846 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:07:23 crc kubenswrapper[4706]: I1206 06:07:23.809858 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3d4f10c6-fcf1-472e-ad86-e9d986c838ba-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.115036 4706 generic.go:334] "Generic (PLEG): container finished" podID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerID="2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2" exitCode=0 Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.115092 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerDied","Data":"2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2"} Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.115119 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gsmrx" event={"ID":"3d4f10c6-fcf1-472e-ad86-e9d986c838ba","Type":"ContainerDied","Data":"edef8d4b2c8371404b14d0b6e8d63b431dc975daa65a403ee2973cf06da6fb22"} Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.115137 4706 scope.go:117] "RemoveContainer" containerID="2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.115194 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gsmrx" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.137256 4706 scope.go:117] "RemoveContainer" containerID="d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.142108 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gsmrx"] Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.155797 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gsmrx"] Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.157635 4706 scope.go:117] "RemoveContainer" containerID="05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.221111 4706 scope.go:117] "RemoveContainer" containerID="2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2" Dec 06 06:07:24 crc kubenswrapper[4706]: E1206 06:07:24.221673 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2\": container with ID starting with 2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2 not found: ID does not exist" containerID="2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.221753 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2"} err="failed to get container status \"2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2\": rpc error: code = NotFound desc = could not find container \"2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2\": container with ID starting with 2ecaafe588b7ffb5fbf2bc2a084858c03f104ec2c5c6a867e95e221fa73a2cf2 not found: ID does not exist" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.221795 4706 scope.go:117] "RemoveContainer" containerID="d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569" Dec 06 06:07:24 crc kubenswrapper[4706]: E1206 06:07:24.222238 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569\": container with ID starting with d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569 not found: ID does not exist" containerID="d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.222285 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569"} err="failed to get container status \"d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569\": rpc error: code = NotFound desc = could not find container \"d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569\": container with ID starting with d7a3434bee5522ba7014907e3279f886cd326a74438519e05b1017a8cc7ee569 not found: ID does not exist" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.222318 4706 scope.go:117] "RemoveContainer" containerID="05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522" Dec 06 06:07:24 crc kubenswrapper[4706]: E1206 06:07:24.222897 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522\": container with ID starting with 05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522 not found: ID does not exist" containerID="05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522" Dec 06 06:07:24 crc kubenswrapper[4706]: I1206 06:07:24.222926 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522"} err="failed to get container status \"05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522\": rpc error: code = NotFound desc = could not find container \"05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522\": container with ID starting with 05dc4165bbf1da4e4f98982a12605bb765d10fbb441775e85e0243060c281522 not found: ID does not exist" Dec 06 06:07:26 crc kubenswrapper[4706]: I1206 06:07:26.047968 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" path="/var/lib/kubelet/pods/3d4f10c6-fcf1-472e-ad86-e9d986c838ba/volumes" Dec 06 06:07:35 crc kubenswrapper[4706]: I1206 06:07:35.961708 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:07:35 crc kubenswrapper[4706]: I1206 06:07:35.962277 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.071746 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4krs6"] Dec 06 06:08:03 crc kubenswrapper[4706]: E1206 06:08:03.072751 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="extract-content" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.072765 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="extract-content" Dec 06 06:08:03 crc kubenswrapper[4706]: E1206 06:08:03.072781 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="extract-utilities" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.072787 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="extract-utilities" Dec 06 06:08:03 crc kubenswrapper[4706]: E1206 06:08:03.072810 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="registry-server" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.072816 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="registry-server" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.072989 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d4f10c6-fcf1-472e-ad86-e9d986c838ba" containerName="registry-server" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.074415 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.111603 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4krs6"] Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.172226 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-utilities\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.172651 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-catalog-content\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.172700 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnnmw\" (UniqueName: \"kubernetes.io/projected/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-kube-api-access-bnnmw\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.274865 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-utilities\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.274990 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnnmw\" (UniqueName: \"kubernetes.io/projected/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-kube-api-access-bnnmw\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.275007 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-catalog-content\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.275468 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-utilities\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.275484 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-catalog-content\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.293912 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnnmw\" (UniqueName: \"kubernetes.io/projected/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-kube-api-access-bnnmw\") pod \"community-operators-4krs6\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.395730 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:03 crc kubenswrapper[4706]: I1206 06:08:03.922866 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4krs6"] Dec 06 06:08:04 crc kubenswrapper[4706]: I1206 06:08:04.531677 4706 generic.go:334] "Generic (PLEG): container finished" podID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerID="d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb" exitCode=0 Dec 06 06:08:04 crc kubenswrapper[4706]: I1206 06:08:04.531861 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerDied","Data":"d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb"} Dec 06 06:08:04 crc kubenswrapper[4706]: I1206 06:08:04.531940 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerStarted","Data":"c0406cd91d634a0fdc13f51d2a63659f224a48c9e150891879c8c550d3ca341a"} Dec 06 06:08:05 crc kubenswrapper[4706]: I1206 06:08:05.542704 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerStarted","Data":"172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342"} Dec 06 06:08:05 crc kubenswrapper[4706]: I1206 06:08:05.961282 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:08:05 crc kubenswrapper[4706]: I1206 06:08:05.961390 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.465085 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-24l26"] Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.467268 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.483254 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24l26"] Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.544174 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-utilities\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.544309 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqmsv\" (UniqueName: \"kubernetes.io/projected/4c95db3b-6e3d-4d44-b004-335a060129b0-kube-api-access-gqmsv\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.544336 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-catalog-content\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.557791 4706 generic.go:334] "Generic (PLEG): container finished" podID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerID="172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342" exitCode=0 Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.557851 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerDied","Data":"172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342"} Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.646102 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqmsv\" (UniqueName: \"kubernetes.io/projected/4c95db3b-6e3d-4d44-b004-335a060129b0-kube-api-access-gqmsv\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.646153 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-catalog-content\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.646245 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-utilities\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.646684 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-catalog-content\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.646747 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-utilities\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.668405 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqmsv\" (UniqueName: \"kubernetes.io/projected/4c95db3b-6e3d-4d44-b004-335a060129b0-kube-api-access-gqmsv\") pod \"redhat-marketplace-24l26\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.792938 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.880301 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zbjrp"] Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.882192 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.907929 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zbjrp"] Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.956038 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-utilities\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.956260 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-catalog-content\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:06 crc kubenswrapper[4706]: I1206 06:08:06.956292 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx2cm\" (UniqueName: \"kubernetes.io/projected/ee0e69aa-7eaf-4049-b829-c88caca5a09a-kube-api-access-jx2cm\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.071035 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-catalog-content\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.071455 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx2cm\" (UniqueName: \"kubernetes.io/projected/ee0e69aa-7eaf-4049-b829-c88caca5a09a-kube-api-access-jx2cm\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.071525 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-utilities\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.072282 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-utilities\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.072568 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-catalog-content\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.109815 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx2cm\" (UniqueName: \"kubernetes.io/projected/ee0e69aa-7eaf-4049-b829-c88caca5a09a-kube-api-access-jx2cm\") pod \"redhat-operators-zbjrp\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.263682 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.329792 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24l26"] Dec 06 06:08:07 crc kubenswrapper[4706]: W1206 06:08:07.335304 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4c95db3b_6e3d_4d44_b004_335a060129b0.slice/crio-65b7771560baa6eb7b4bae8f6f319192d19b6a1e23703b6465727fefec449bdd WatchSource:0}: Error finding container 65b7771560baa6eb7b4bae8f6f319192d19b6a1e23703b6465727fefec449bdd: Status 404 returned error can't find the container with id 65b7771560baa6eb7b4bae8f6f319192d19b6a1e23703b6465727fefec449bdd Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.570130 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24l26" event={"ID":"4c95db3b-6e3d-4d44-b004-335a060129b0","Type":"ContainerStarted","Data":"65b7771560baa6eb7b4bae8f6f319192d19b6a1e23703b6465727fefec449bdd"} Dec 06 06:08:07 crc kubenswrapper[4706]: I1206 06:08:07.739847 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zbjrp"] Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.579550 4706 generic.go:334] "Generic (PLEG): container finished" podID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerID="76b43a66519e4a0661606865e99c7720a2f1651138488cc1ceaef8d07139db63" exitCode=0 Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.579892 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerDied","Data":"76b43a66519e4a0661606865e99c7720a2f1651138488cc1ceaef8d07139db63"} Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.579920 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerStarted","Data":"135e4e95b85ac79816286fd30e61eec078652c71dc4c113dd5e7a587b04e6cf3"} Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.584483 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerStarted","Data":"02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5"} Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.587361 4706 generic.go:334] "Generic (PLEG): container finished" podID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerID="324e5a2aaaa0fee548b305f83219c07dcb04d33220199972a66d9fdd1d3679b5" exitCode=0 Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.587392 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24l26" event={"ID":"4c95db3b-6e3d-4d44-b004-335a060129b0","Type":"ContainerDied","Data":"324e5a2aaaa0fee548b305f83219c07dcb04d33220199972a66d9fdd1d3679b5"} Dec 06 06:08:08 crc kubenswrapper[4706]: I1206 06:08:08.639696 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4krs6" podStartSLOduration=3.107341935 podStartE2EDuration="5.639680615s" podCreationTimestamp="2025-12-06 06:08:03 +0000 UTC" firstStartedPulling="2025-12-06 06:08:04.533396242 +0000 UTC m=+2906.861220186" lastFinishedPulling="2025-12-06 06:08:07.065734912 +0000 UTC m=+2909.393558866" observedRunningTime="2025-12-06 06:08:08.636457958 +0000 UTC m=+2910.964281912" watchObservedRunningTime="2025-12-06 06:08:08.639680615 +0000 UTC m=+2910.967504559" Dec 06 06:08:10 crc kubenswrapper[4706]: I1206 06:08:10.606299 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerStarted","Data":"c0e603a26d1e692d1ec944815da8b6dd8ed03f8866f5e4b0a59bee48896f38ad"} Dec 06 06:08:10 crc kubenswrapper[4706]: I1206 06:08:10.609890 4706 generic.go:334] "Generic (PLEG): container finished" podID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerID="b0c2dc8283b92f40cca697fec66b6a71316bf8efd0a716436abbf97bec7a03d5" exitCode=0 Dec 06 06:08:10 crc kubenswrapper[4706]: I1206 06:08:10.609929 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24l26" event={"ID":"4c95db3b-6e3d-4d44-b004-335a060129b0","Type":"ContainerDied","Data":"b0c2dc8283b92f40cca697fec66b6a71316bf8efd0a716436abbf97bec7a03d5"} Dec 06 06:08:12 crc kubenswrapper[4706]: I1206 06:08:12.628851 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24l26" event={"ID":"4c95db3b-6e3d-4d44-b004-335a060129b0","Type":"ContainerStarted","Data":"1e5697569b84891d77ffa53c21cd7a5916a10cbc3a0a149f3ed4ed4cc3b598ac"} Dec 06 06:08:12 crc kubenswrapper[4706]: I1206 06:08:12.654407 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-24l26" podStartSLOduration=3.726015918 podStartE2EDuration="6.654389656s" podCreationTimestamp="2025-12-06 06:08:06 +0000 UTC" firstStartedPulling="2025-12-06 06:08:08.589014004 +0000 UTC m=+2910.916837948" lastFinishedPulling="2025-12-06 06:08:11.517387742 +0000 UTC m=+2913.845211686" observedRunningTime="2025-12-06 06:08:12.646203994 +0000 UTC m=+2914.974027938" watchObservedRunningTime="2025-12-06 06:08:12.654389656 +0000 UTC m=+2914.982213600" Dec 06 06:08:13 crc kubenswrapper[4706]: I1206 06:08:13.396255 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:13 crc kubenswrapper[4706]: I1206 06:08:13.396577 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:13 crc kubenswrapper[4706]: I1206 06:08:13.442006 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:13 crc kubenswrapper[4706]: I1206 06:08:13.639259 4706 generic.go:334] "Generic (PLEG): container finished" podID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerID="c0e603a26d1e692d1ec944815da8b6dd8ed03f8866f5e4b0a59bee48896f38ad" exitCode=0 Dec 06 06:08:13 crc kubenswrapper[4706]: I1206 06:08:13.639345 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerDied","Data":"c0e603a26d1e692d1ec944815da8b6dd8ed03f8866f5e4b0a59bee48896f38ad"} Dec 06 06:08:13 crc kubenswrapper[4706]: I1206 06:08:13.688458 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:16 crc kubenswrapper[4706]: I1206 06:08:16.794066 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:16 crc kubenswrapper[4706]: I1206 06:08:16.794656 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:16 crc kubenswrapper[4706]: I1206 06:08:16.851632 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:16 crc kubenswrapper[4706]: I1206 06:08:16.856258 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4krs6"] Dec 06 06:08:16 crc kubenswrapper[4706]: I1206 06:08:16.856532 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4krs6" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="registry-server" containerID="cri-o://02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5" gracePeriod=2 Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.442466 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.502895 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-utilities\") pod \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.502977 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-catalog-content\") pod \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.503011 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnnmw\" (UniqueName: \"kubernetes.io/projected/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-kube-api-access-bnnmw\") pod \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\" (UID: \"62f0ca0f-b87f-40d2-9ca3-28677dd9a361\") " Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.503669 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-utilities" (OuterVolumeSpecName: "utilities") pod "62f0ca0f-b87f-40d2-9ca3-28677dd9a361" (UID: "62f0ca0f-b87f-40d2-9ca3-28677dd9a361"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.504110 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.510344 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-kube-api-access-bnnmw" (OuterVolumeSpecName: "kube-api-access-bnnmw") pod "62f0ca0f-b87f-40d2-9ca3-28677dd9a361" (UID: "62f0ca0f-b87f-40d2-9ca3-28677dd9a361"). InnerVolumeSpecName "kube-api-access-bnnmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.551534 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "62f0ca0f-b87f-40d2-9ca3-28677dd9a361" (UID: "62f0ca0f-b87f-40d2-9ca3-28677dd9a361"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.606435 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.606470 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnnmw\" (UniqueName: \"kubernetes.io/projected/62f0ca0f-b87f-40d2-9ca3-28677dd9a361-kube-api-access-bnnmw\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.682582 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerStarted","Data":"315d1d9843980595c4d070fe74d474a793c588dd6d8ffad0735e1e3ff487c1f6"} Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.686853 4706 generic.go:334] "Generic (PLEG): container finished" podID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerID="02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5" exitCode=0 Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.686952 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4krs6" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.686973 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerDied","Data":"02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5"} Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.687074 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4krs6" event={"ID":"62f0ca0f-b87f-40d2-9ca3-28677dd9a361","Type":"ContainerDied","Data":"c0406cd91d634a0fdc13f51d2a63659f224a48c9e150891879c8c550d3ca341a"} Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.687106 4706 scope.go:117] "RemoveContainer" containerID="02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.710692 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zbjrp" podStartSLOduration=3.607623206 podStartE2EDuration="11.71067388s" podCreationTimestamp="2025-12-06 06:08:06 +0000 UTC" firstStartedPulling="2025-12-06 06:08:08.581735088 +0000 UTC m=+2910.909559032" lastFinishedPulling="2025-12-06 06:08:16.684785762 +0000 UTC m=+2919.012609706" observedRunningTime="2025-12-06 06:08:17.704503884 +0000 UTC m=+2920.032327848" watchObservedRunningTime="2025-12-06 06:08:17.71067388 +0000 UTC m=+2920.038497824" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.732866 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4krs6"] Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.735965 4706 scope.go:117] "RemoveContainer" containerID="172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.741857 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4krs6"] Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.757350 4706 scope.go:117] "RemoveContainer" containerID="d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.762396 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.811695 4706 scope.go:117] "RemoveContainer" containerID="02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5" Dec 06 06:08:17 crc kubenswrapper[4706]: E1206 06:08:17.812241 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5\": container with ID starting with 02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5 not found: ID does not exist" containerID="02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.812274 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5"} err="failed to get container status \"02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5\": rpc error: code = NotFound desc = could not find container \"02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5\": container with ID starting with 02c67d1d551b2d4230db681755ee04db6ad140a9095c2ddffa7ccfd01240bbc5 not found: ID does not exist" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.812298 4706 scope.go:117] "RemoveContainer" containerID="172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342" Dec 06 06:08:17 crc kubenswrapper[4706]: E1206 06:08:17.812597 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342\": container with ID starting with 172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342 not found: ID does not exist" containerID="172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.812616 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342"} err="failed to get container status \"172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342\": rpc error: code = NotFound desc = could not find container \"172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342\": container with ID starting with 172e9bc789361aff790256d968e43fc3ea46549a6df0aa36d0e0ce7c793d5342 not found: ID does not exist" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.812627 4706 scope.go:117] "RemoveContainer" containerID="d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb" Dec 06 06:08:17 crc kubenswrapper[4706]: E1206 06:08:17.812869 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb\": container with ID starting with d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb not found: ID does not exist" containerID="d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb" Dec 06 06:08:17 crc kubenswrapper[4706]: I1206 06:08:17.812889 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb"} err="failed to get container status \"d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb\": rpc error: code = NotFound desc = could not find container \"d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb\": container with ID starting with d057e0046a4e15df586c65420090bc7ca99b828791255711713081c0712b37fb not found: ID does not exist" Dec 06 06:08:18 crc kubenswrapper[4706]: I1206 06:08:18.049721 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" path="/var/lib/kubelet/pods/62f0ca0f-b87f-40d2-9ca3-28677dd9a361/volumes" Dec 06 06:08:19 crc kubenswrapper[4706]: I1206 06:08:19.853281 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-24l26"] Dec 06 06:08:19 crc kubenswrapper[4706]: I1206 06:08:19.853733 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-24l26" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="registry-server" containerID="cri-o://1e5697569b84891d77ffa53c21cd7a5916a10cbc3a0a149f3ed4ed4cc3b598ac" gracePeriod=2 Dec 06 06:08:20 crc kubenswrapper[4706]: I1206 06:08:20.724645 4706 generic.go:334] "Generic (PLEG): container finished" podID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerID="1e5697569b84891d77ffa53c21cd7a5916a10cbc3a0a149f3ed4ed4cc3b598ac" exitCode=0 Dec 06 06:08:20 crc kubenswrapper[4706]: I1206 06:08:20.724700 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24l26" event={"ID":"4c95db3b-6e3d-4d44-b004-335a060129b0","Type":"ContainerDied","Data":"1e5697569b84891d77ffa53c21cd7a5916a10cbc3a0a149f3ed4ed4cc3b598ac"} Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.180979 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.268880 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gqmsv\" (UniqueName: \"kubernetes.io/projected/4c95db3b-6e3d-4d44-b004-335a060129b0-kube-api-access-gqmsv\") pod \"4c95db3b-6e3d-4d44-b004-335a060129b0\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.269059 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-utilities\") pod \"4c95db3b-6e3d-4d44-b004-335a060129b0\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.269101 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-catalog-content\") pod \"4c95db3b-6e3d-4d44-b004-335a060129b0\" (UID: \"4c95db3b-6e3d-4d44-b004-335a060129b0\") " Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.274904 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-utilities" (OuterVolumeSpecName: "utilities") pod "4c95db3b-6e3d-4d44-b004-335a060129b0" (UID: "4c95db3b-6e3d-4d44-b004-335a060129b0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.276092 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c95db3b-6e3d-4d44-b004-335a060129b0-kube-api-access-gqmsv" (OuterVolumeSpecName: "kube-api-access-gqmsv") pod "4c95db3b-6e3d-4d44-b004-335a060129b0" (UID: "4c95db3b-6e3d-4d44-b004-335a060129b0"). InnerVolumeSpecName "kube-api-access-gqmsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.292911 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c95db3b-6e3d-4d44-b004-335a060129b0" (UID: "4c95db3b-6e3d-4d44-b004-335a060129b0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.371255 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.371298 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gqmsv\" (UniqueName: \"kubernetes.io/projected/4c95db3b-6e3d-4d44-b004-335a060129b0-kube-api-access-gqmsv\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.371309 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c95db3b-6e3d-4d44-b004-335a060129b0-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.735594 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24l26" event={"ID":"4c95db3b-6e3d-4d44-b004-335a060129b0","Type":"ContainerDied","Data":"65b7771560baa6eb7b4bae8f6f319192d19b6a1e23703b6465727fefec449bdd"} Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.735669 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24l26" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.735904 4706 scope.go:117] "RemoveContainer" containerID="1e5697569b84891d77ffa53c21cd7a5916a10cbc3a0a149f3ed4ed4cc3b598ac" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.753498 4706 scope.go:117] "RemoveContainer" containerID="b0c2dc8283b92f40cca697fec66b6a71316bf8efd0a716436abbf97bec7a03d5" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.774310 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-24l26"] Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.778577 4706 scope.go:117] "RemoveContainer" containerID="324e5a2aaaa0fee548b305f83219c07dcb04d33220199972a66d9fdd1d3679b5" Dec 06 06:08:21 crc kubenswrapper[4706]: I1206 06:08:21.784175 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-24l26"] Dec 06 06:08:22 crc kubenswrapper[4706]: I1206 06:08:22.050560 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" path="/var/lib/kubelet/pods/4c95db3b-6e3d-4d44-b004-335a060129b0/volumes" Dec 06 06:08:27 crc kubenswrapper[4706]: I1206 06:08:27.264070 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:27 crc kubenswrapper[4706]: I1206 06:08:27.264404 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:27 crc kubenswrapper[4706]: I1206 06:08:27.313890 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:27 crc kubenswrapper[4706]: I1206 06:08:27.830679 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:27 crc kubenswrapper[4706]: I1206 06:08:27.877569 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zbjrp"] Dec 06 06:08:29 crc kubenswrapper[4706]: I1206 06:08:29.809683 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zbjrp" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="registry-server" containerID="cri-o://315d1d9843980595c4d070fe74d474a793c588dd6d8ffad0735e1e3ff487c1f6" gracePeriod=2 Dec 06 06:08:30 crc kubenswrapper[4706]: I1206 06:08:30.821526 4706 generic.go:334] "Generic (PLEG): container finished" podID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerID="315d1d9843980595c4d070fe74d474a793c588dd6d8ffad0735e1e3ff487c1f6" exitCode=0 Dec 06 06:08:30 crc kubenswrapper[4706]: I1206 06:08:30.821609 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerDied","Data":"315d1d9843980595c4d070fe74d474a793c588dd6d8ffad0735e1e3ff487c1f6"} Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.770036 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.834144 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zbjrp" event={"ID":"ee0e69aa-7eaf-4049-b829-c88caca5a09a","Type":"ContainerDied","Data":"135e4e95b85ac79816286fd30e61eec078652c71dc4c113dd5e7a587b04e6cf3"} Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.834243 4706 scope.go:117] "RemoveContainer" containerID="315d1d9843980595c4d070fe74d474a793c588dd6d8ffad0735e1e3ff487c1f6" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.834493 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zbjrp" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.864152 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx2cm\" (UniqueName: \"kubernetes.io/projected/ee0e69aa-7eaf-4049-b829-c88caca5a09a-kube-api-access-jx2cm\") pod \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.864775 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-utilities\") pod \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.864977 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-catalog-content\") pod \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\" (UID: \"ee0e69aa-7eaf-4049-b829-c88caca5a09a\") " Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.865628 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-utilities" (OuterVolumeSpecName: "utilities") pod "ee0e69aa-7eaf-4049-b829-c88caca5a09a" (UID: "ee0e69aa-7eaf-4049-b829-c88caca5a09a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.866040 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.872484 4706 scope.go:117] "RemoveContainer" containerID="c0e603a26d1e692d1ec944815da8b6dd8ed03f8866f5e4b0a59bee48896f38ad" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.903278 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee0e69aa-7eaf-4049-b829-c88caca5a09a-kube-api-access-jx2cm" (OuterVolumeSpecName: "kube-api-access-jx2cm") pod "ee0e69aa-7eaf-4049-b829-c88caca5a09a" (UID: "ee0e69aa-7eaf-4049-b829-c88caca5a09a"). InnerVolumeSpecName "kube-api-access-jx2cm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.968323 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx2cm\" (UniqueName: \"kubernetes.io/projected/ee0e69aa-7eaf-4049-b829-c88caca5a09a-kube-api-access-jx2cm\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:31 crc kubenswrapper[4706]: I1206 06:08:31.978210 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee0e69aa-7eaf-4049-b829-c88caca5a09a" (UID: "ee0e69aa-7eaf-4049-b829-c88caca5a09a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:08:32 crc kubenswrapper[4706]: I1206 06:08:32.070683 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee0e69aa-7eaf-4049-b829-c88caca5a09a-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:32 crc kubenswrapper[4706]: I1206 06:08:32.154456 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zbjrp"] Dec 06 06:08:32 crc kubenswrapper[4706]: I1206 06:08:32.161971 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zbjrp"] Dec 06 06:08:32 crc kubenswrapper[4706]: I1206 06:08:32.243473 4706 scope.go:117] "RemoveContainer" containerID="76b43a66519e4a0661606865e99c7720a2f1651138488cc1ceaef8d07139db63" Dec 06 06:08:34 crc kubenswrapper[4706]: I1206 06:08:34.048356 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" path="/var/lib/kubelet/pods/ee0e69aa-7eaf-4049-b829-c88caca5a09a/volumes" Dec 06 06:08:35 crc kubenswrapper[4706]: I1206 06:08:35.961365 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:08:35 crc kubenswrapper[4706]: I1206 06:08:35.961813 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:08:35 crc kubenswrapper[4706]: I1206 06:08:35.961864 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:08:35 crc kubenswrapper[4706]: I1206 06:08:35.962467 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e2b74aea2b90c903c5ee5b5ee43d5053c167a45bd911f600901cf01edd2fc497"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:08:35 crc kubenswrapper[4706]: I1206 06:08:35.962542 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://e2b74aea2b90c903c5ee5b5ee43d5053c167a45bd911f600901cf01edd2fc497" gracePeriod=600 Dec 06 06:08:36 crc kubenswrapper[4706]: I1206 06:08:36.881344 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="e2b74aea2b90c903c5ee5b5ee43d5053c167a45bd911f600901cf01edd2fc497" exitCode=0 Dec 06 06:08:36 crc kubenswrapper[4706]: I1206 06:08:36.881419 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"e2b74aea2b90c903c5ee5b5ee43d5053c167a45bd911f600901cf01edd2fc497"} Dec 06 06:08:36 crc kubenswrapper[4706]: I1206 06:08:36.881617 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc"} Dec 06 06:08:36 crc kubenswrapper[4706]: I1206 06:08:36.881659 4706 scope.go:117] "RemoveContainer" containerID="34820f45b6f7c053f29f15a4fe8ee550cfbf36d430d9054899ede74382acbbeb" Dec 06 06:08:44 crc kubenswrapper[4706]: I1206 06:08:44.948782 4706 generic.go:334] "Generic (PLEG): container finished" podID="5620e36a-01d5-4282-ad0c-a3e96dc38329" containerID="e4c12e1658dd37201a13ce228e8ba1ef84f2fad4a8b9687d0f2ef1d2b9f91c42" exitCode=0 Dec 06 06:08:44 crc kubenswrapper[4706]: I1206 06:08:44.948860 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" event={"ID":"5620e36a-01d5-4282-ad0c-a3e96dc38329","Type":"ContainerDied","Data":"e4c12e1658dd37201a13ce228e8ba1ef84f2fad4a8b9687d0f2ef1d2b9f91c42"} Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.388776 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.548023 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-ssh-key\") pod \"5620e36a-01d5-4282-ad0c-a3e96dc38329\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.548142 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nqmcg\" (UniqueName: \"kubernetes.io/projected/5620e36a-01d5-4282-ad0c-a3e96dc38329-kube-api-access-nqmcg\") pod \"5620e36a-01d5-4282-ad0c-a3e96dc38329\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.548174 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-combined-ca-bundle\") pod \"5620e36a-01d5-4282-ad0c-a3e96dc38329\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.548375 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-inventory\") pod \"5620e36a-01d5-4282-ad0c-a3e96dc38329\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.548473 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-secret-0\") pod \"5620e36a-01d5-4282-ad0c-a3e96dc38329\" (UID: \"5620e36a-01d5-4282-ad0c-a3e96dc38329\") " Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.554978 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "5620e36a-01d5-4282-ad0c-a3e96dc38329" (UID: "5620e36a-01d5-4282-ad0c-a3e96dc38329"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.555984 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5620e36a-01d5-4282-ad0c-a3e96dc38329-kube-api-access-nqmcg" (OuterVolumeSpecName: "kube-api-access-nqmcg") pod "5620e36a-01d5-4282-ad0c-a3e96dc38329" (UID: "5620e36a-01d5-4282-ad0c-a3e96dc38329"). InnerVolumeSpecName "kube-api-access-nqmcg". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.588781 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "5620e36a-01d5-4282-ad0c-a3e96dc38329" (UID: "5620e36a-01d5-4282-ad0c-a3e96dc38329"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.590164 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "5620e36a-01d5-4282-ad0c-a3e96dc38329" (UID: "5620e36a-01d5-4282-ad0c-a3e96dc38329"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.594688 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-inventory" (OuterVolumeSpecName: "inventory") pod "5620e36a-01d5-4282-ad0c-a3e96dc38329" (UID: "5620e36a-01d5-4282-ad0c-a3e96dc38329"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.650249 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.650310 4706 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.650320 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.650328 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nqmcg\" (UniqueName: \"kubernetes.io/projected/5620e36a-01d5-4282-ad0c-a3e96dc38329-kube-api-access-nqmcg\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.650338 4706 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5620e36a-01d5-4282-ad0c-a3e96dc38329-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.972003 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" event={"ID":"5620e36a-01d5-4282-ad0c-a3e96dc38329","Type":"ContainerDied","Data":"6858fcb80924ace1b176325ebb386fa192b7ba0e4acf5588fc39c9e9f256f09d"} Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.972088 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb" Dec 06 06:08:46 crc kubenswrapper[4706]: I1206 06:08:46.972105 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6858fcb80924ace1b176325ebb386fa192b7ba0e4acf5588fc39c9e9f256f09d" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088111 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84"] Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088592 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5620e36a-01d5-4282-ad0c-a3e96dc38329" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088627 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="5620e36a-01d5-4282-ad0c-a3e96dc38329" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088654 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="extract-utilities" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088662 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="extract-utilities" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088678 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="extract-utilities" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088685 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="extract-utilities" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088696 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="extract-content" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088704 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="extract-content" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088718 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="extract-utilities" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088726 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="extract-utilities" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088736 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="extract-content" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088744 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="extract-content" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088756 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="extract-content" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088763 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="extract-content" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088776 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088792 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088837 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088848 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: E1206 06:08:47.088862 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.088870 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.089175 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee0e69aa-7eaf-4049-b829-c88caca5a09a" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.089204 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c95db3b-6e3d-4d44-b004-335a060129b0" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.089224 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="62f0ca0f-b87f-40d2-9ca3-28677dd9a361" containerName="registry-server" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.089253 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="5620e36a-01d5-4282-ad0c-a3e96dc38329" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.090062 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.098216 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.098460 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.098614 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.098768 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.098929 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.099111 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.099263 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.100457 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84"] Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261427 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261514 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261646 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261696 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261772 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261852 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261885 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261907 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjd2\" (UniqueName: \"kubernetes.io/projected/c4a06494-e4f9-427e-b7e2-dad0c843d44a-kube-api-access-cnjd2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.261983 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363237 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363532 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363599 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363643 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363692 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363715 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363739 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjd2\" (UniqueName: \"kubernetes.io/projected/c4a06494-e4f9-427e-b7e2-dad0c843d44a-kube-api-access-cnjd2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363825 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.363879 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.365162 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.369170 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.369707 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.370024 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.370340 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.370550 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.377690 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.377982 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.383968 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjd2\" (UniqueName: \"kubernetes.io/projected/c4a06494-e4f9-427e-b7e2-dad0c843d44a-kube-api-access-cnjd2\") pod \"nova-edpm-deployment-openstack-edpm-ipam-kfg84\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.417939 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.923986 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84"] Dec 06 06:08:47 crc kubenswrapper[4706]: I1206 06:08:47.989618 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" event={"ID":"c4a06494-e4f9-427e-b7e2-dad0c843d44a","Type":"ContainerStarted","Data":"673fdf984da273caa7a6656ba8a94aa6257bf2196adec4c2c0855c632df91e5c"} Dec 06 06:08:50 crc kubenswrapper[4706]: I1206 06:08:50.010110 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" event={"ID":"c4a06494-e4f9-427e-b7e2-dad0c843d44a","Type":"ContainerStarted","Data":"c51b7e0d8a3640203ba49edc67a91b1818fcf175156a73b69423607dc662d9f7"} Dec 06 06:08:50 crc kubenswrapper[4706]: I1206 06:08:50.038507 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" podStartSLOduration=2.398462693 podStartE2EDuration="3.038485274s" podCreationTimestamp="2025-12-06 06:08:47 +0000 UTC" firstStartedPulling="2025-12-06 06:08:47.930910878 +0000 UTC m=+2950.258734822" lastFinishedPulling="2025-12-06 06:08:48.570933459 +0000 UTC m=+2950.898757403" observedRunningTime="2025-12-06 06:08:50.030455517 +0000 UTC m=+2952.358279471" watchObservedRunningTime="2025-12-06 06:08:50.038485274 +0000 UTC m=+2952.366309228" Dec 06 06:11:05 crc kubenswrapper[4706]: I1206 06:11:05.960976 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:11:05 crc kubenswrapper[4706]: I1206 06:11:05.961491 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:11:35 crc kubenswrapper[4706]: I1206 06:11:35.961479 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:11:35 crc kubenswrapper[4706]: I1206 06:11:35.962641 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:11:43 crc kubenswrapper[4706]: I1206 06:11:43.188171 4706 generic.go:334] "Generic (PLEG): container finished" podID="c4a06494-e4f9-427e-b7e2-dad0c843d44a" containerID="c51b7e0d8a3640203ba49edc67a91b1818fcf175156a73b69423607dc662d9f7" exitCode=0 Dec 06 06:11:43 crc kubenswrapper[4706]: I1206 06:11:43.188243 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" event={"ID":"c4a06494-e4f9-427e-b7e2-dad0c843d44a","Type":"ContainerDied","Data":"c51b7e0d8a3640203ba49edc67a91b1818fcf175156a73b69423607dc662d9f7"} Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.605373 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.642204 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-inventory\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.642445 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnjd2\" (UniqueName: \"kubernetes.io/projected/c4a06494-e4f9-427e-b7e2-dad0c843d44a-kube-api-access-cnjd2\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.642579 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-extra-config-0\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.642690 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-ssh-key\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.642826 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-1\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.643205 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-1\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.643367 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-0\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.643515 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-combined-ca-bundle\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.643649 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-0\") pod \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\" (UID: \"c4a06494-e4f9-427e-b7e2-dad0c843d44a\") " Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.648302 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.649229 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4a06494-e4f9-427e-b7e2-dad0c843d44a-kube-api-access-cnjd2" (OuterVolumeSpecName: "kube-api-access-cnjd2") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "kube-api-access-cnjd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.677305 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.677337 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-inventory" (OuterVolumeSpecName: "inventory") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.677838 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.679884 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.683403 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.693978 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.706467 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "c4a06494-e4f9-427e-b7e2-dad0c843d44a" (UID: "c4a06494-e4f9-427e-b7e2-dad0c843d44a"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745732 4706 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745763 4706 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745772 4706 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745782 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745790 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnjd2\" (UniqueName: \"kubernetes.io/projected/c4a06494-e4f9-427e-b7e2-dad0c843d44a-kube-api-access-cnjd2\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745799 4706 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745808 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745816 4706 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:44 crc kubenswrapper[4706]: I1206 06:11:44.745825 4706 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/c4a06494-e4f9-427e-b7e2-dad0c843d44a-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.205377 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" event={"ID":"c4a06494-e4f9-427e-b7e2-dad0c843d44a","Type":"ContainerDied","Data":"673fdf984da273caa7a6656ba8a94aa6257bf2196adec4c2c0855c632df91e5c"} Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.205657 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="673fdf984da273caa7a6656ba8a94aa6257bf2196adec4c2c0855c632df91e5c" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.205446 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-kfg84" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.312042 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb"] Dec 06 06:11:45 crc kubenswrapper[4706]: E1206 06:11:45.312874 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4a06494-e4f9-427e-b7e2-dad0c843d44a" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.312894 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4a06494-e4f9-427e-b7e2-dad0c843d44a" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.313449 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4a06494-e4f9-427e-b7e2-dad0c843d44a" containerName="nova-edpm-deployment-openstack-edpm-ipam" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.325422 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.327998 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.329336 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-c9hwl" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.329591 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.329614 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.330330 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.341629 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb"] Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.356654 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.356930 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mp84\" (UniqueName: \"kubernetes.io/projected/19fbc54f-2695-4d41-9221-c5d2731510c1-kube-api-access-7mp84\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.357456 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.357723 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.357849 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.358023 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.358276 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.459587 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.459936 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.459968 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.460013 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.460069 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.460110 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.460126 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mp84\" (UniqueName: \"kubernetes.io/projected/19fbc54f-2695-4d41-9221-c5d2731510c1-kube-api-access-7mp84\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.464258 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.464354 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.464554 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.464642 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.464661 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.466134 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.477270 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mp84\" (UniqueName: \"kubernetes.io/projected/19fbc54f-2695-4d41-9221-c5d2731510c1-kube-api-access-7mp84\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:45 crc kubenswrapper[4706]: I1206 06:11:45.661549 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:11:46 crc kubenswrapper[4706]: I1206 06:11:46.248108 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb"] Dec 06 06:11:46 crc kubenswrapper[4706]: I1206 06:11:46.265112 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:11:47 crc kubenswrapper[4706]: I1206 06:11:47.223017 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" event={"ID":"19fbc54f-2695-4d41-9221-c5d2731510c1","Type":"ContainerStarted","Data":"8c37f1b6efd168fcdb6d0ff0b80c8941d860975425730f8d718004631457c140"} Dec 06 06:11:47 crc kubenswrapper[4706]: I1206 06:11:47.223470 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" event={"ID":"19fbc54f-2695-4d41-9221-c5d2731510c1","Type":"ContainerStarted","Data":"4d6767f6c4e5867cdb6dddd9bb919266f1e841d89a98daccbba774b1a655c0e7"} Dec 06 06:11:47 crc kubenswrapper[4706]: I1206 06:11:47.239576 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" podStartSLOduration=1.9814582170000001 podStartE2EDuration="2.239557788s" podCreationTimestamp="2025-12-06 06:11:45 +0000 UTC" firstStartedPulling="2025-12-06 06:11:46.264861704 +0000 UTC m=+3128.592685648" lastFinishedPulling="2025-12-06 06:11:46.522961275 +0000 UTC m=+3128.850785219" observedRunningTime="2025-12-06 06:11:47.238971792 +0000 UTC m=+3129.566795756" watchObservedRunningTime="2025-12-06 06:11:47.239557788 +0000 UTC m=+3129.567381732" Dec 06 06:12:05 crc kubenswrapper[4706]: I1206 06:12:05.961566 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:12:05 crc kubenswrapper[4706]: I1206 06:12:05.962119 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:12:05 crc kubenswrapper[4706]: I1206 06:12:05.962160 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:12:05 crc kubenswrapper[4706]: I1206 06:12:05.962726 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:12:05 crc kubenswrapper[4706]: I1206 06:12:05.962772 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" gracePeriod=600 Dec 06 06:12:06 crc kubenswrapper[4706]: E1206 06:12:06.081861 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:12:06 crc kubenswrapper[4706]: I1206 06:12:06.386726 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" exitCode=0 Dec 06 06:12:06 crc kubenswrapper[4706]: I1206 06:12:06.386814 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc"} Dec 06 06:12:06 crc kubenswrapper[4706]: I1206 06:12:06.386892 4706 scope.go:117] "RemoveContainer" containerID="e2b74aea2b90c903c5ee5b5ee43d5053c167a45bd911f600901cf01edd2fc497" Dec 06 06:12:06 crc kubenswrapper[4706]: I1206 06:12:06.387868 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:12:06 crc kubenswrapper[4706]: E1206 06:12:06.388466 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:12:18 crc kubenswrapper[4706]: I1206 06:12:18.045884 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:12:18 crc kubenswrapper[4706]: E1206 06:12:18.046669 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:12:29 crc kubenswrapper[4706]: I1206 06:12:29.036212 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:12:29 crc kubenswrapper[4706]: E1206 06:12:29.036959 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:12:41 crc kubenswrapper[4706]: I1206 06:12:41.037106 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:12:41 crc kubenswrapper[4706]: E1206 06:12:41.038442 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:12:54 crc kubenswrapper[4706]: I1206 06:12:54.036607 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:12:54 crc kubenswrapper[4706]: E1206 06:12:54.037537 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:13:05 crc kubenswrapper[4706]: I1206 06:13:05.035988 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:13:05 crc kubenswrapper[4706]: E1206 06:13:05.036715 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:13:18 crc kubenswrapper[4706]: I1206 06:13:18.036348 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:13:18 crc kubenswrapper[4706]: E1206 06:13:18.037285 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:13:31 crc kubenswrapper[4706]: I1206 06:13:31.035829 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:13:31 crc kubenswrapper[4706]: E1206 06:13:31.036618 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:13:43 crc kubenswrapper[4706]: I1206 06:13:43.036235 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:13:43 crc kubenswrapper[4706]: E1206 06:13:43.038315 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:13:58 crc kubenswrapper[4706]: I1206 06:13:58.045023 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:13:58 crc kubenswrapper[4706]: E1206 06:13:58.045784 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:14:09 crc kubenswrapper[4706]: I1206 06:14:09.036212 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:14:09 crc kubenswrapper[4706]: E1206 06:14:09.038087 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:14:18 crc kubenswrapper[4706]: I1206 06:14:18.598878 4706 generic.go:334] "Generic (PLEG): container finished" podID="19fbc54f-2695-4d41-9221-c5d2731510c1" containerID="8c37f1b6efd168fcdb6d0ff0b80c8941d860975425730f8d718004631457c140" exitCode=0 Dec 06 06:14:18 crc kubenswrapper[4706]: I1206 06:14:18.598970 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" event={"ID":"19fbc54f-2695-4d41-9221-c5d2731510c1","Type":"ContainerDied","Data":"8c37f1b6efd168fcdb6d0ff0b80c8941d860975425730f8d718004631457c140"} Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.032316 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110490 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-2\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110528 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-0\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110652 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-inventory\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110689 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7mp84\" (UniqueName: \"kubernetes.io/projected/19fbc54f-2695-4d41-9221-c5d2731510c1-kube-api-access-7mp84\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110718 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-1\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110752 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-telemetry-combined-ca-bundle\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.110776 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ssh-key\") pod \"19fbc54f-2695-4d41-9221-c5d2731510c1\" (UID: \"19fbc54f-2695-4d41-9221-c5d2731510c1\") " Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.124063 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19fbc54f-2695-4d41-9221-c5d2731510c1-kube-api-access-7mp84" (OuterVolumeSpecName: "kube-api-access-7mp84") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "kube-api-access-7mp84". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.124557 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.142457 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.144366 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.146718 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-inventory" (OuterVolumeSpecName: "inventory") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.150087 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.153639 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "19fbc54f-2695-4d41-9221-c5d2731510c1" (UID: "19fbc54f-2695-4d41-9221-c5d2731510c1"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.212960 4706 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.212999 4706 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.213019 4706 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-inventory\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.213031 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7mp84\" (UniqueName: \"kubernetes.io/projected/19fbc54f-2695-4d41-9221-c5d2731510c1-kube-api-access-7mp84\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.213042 4706 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.213065 4706 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.213073 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/19fbc54f-2695-4d41-9221-c5d2731510c1-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.643331 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" event={"ID":"19fbc54f-2695-4d41-9221-c5d2731510c1","Type":"ContainerDied","Data":"4d6767f6c4e5867cdb6dddd9bb919266f1e841d89a98daccbba774b1a655c0e7"} Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.643688 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d6767f6c4e5867cdb6dddd9bb919266f1e841d89a98daccbba774b1a655c0e7" Dec 06 06:14:20 crc kubenswrapper[4706]: I1206 06:14:20.643391 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb" Dec 06 06:14:24 crc kubenswrapper[4706]: I1206 06:14:24.035865 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:14:24 crc kubenswrapper[4706]: E1206 06:14:24.036498 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:14:35 crc kubenswrapper[4706]: I1206 06:14:35.036228 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:14:35 crc kubenswrapper[4706]: E1206 06:14:35.037177 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:14:46 crc kubenswrapper[4706]: I1206 06:14:46.036770 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:14:46 crc kubenswrapper[4706]: E1206 06:14:46.037570 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:14:58 crc kubenswrapper[4706]: I1206 06:14:58.044772 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:14:58 crc kubenswrapper[4706]: E1206 06:14:58.047584 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.155752 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc"] Dec 06 06:15:00 crc kubenswrapper[4706]: E1206 06:15:00.156294 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19fbc54f-2695-4d41-9221-c5d2731510c1" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.156310 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="19fbc54f-2695-4d41-9221-c5d2731510c1" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.156503 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="19fbc54f-2695-4d41-9221-c5d2731510c1" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.157302 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.159716 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.159930 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.164652 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc"] Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.213148 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b900bd-df97-479a-9af4-bf6af9353f91-config-volume\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.213257 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhq6n\" (UniqueName: \"kubernetes.io/projected/49b900bd-df97-479a-9af4-bf6af9353f91-kube-api-access-zhq6n\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.213332 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b900bd-df97-479a-9af4-bf6af9353f91-secret-volume\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.314851 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b900bd-df97-479a-9af4-bf6af9353f91-config-volume\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.314913 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhq6n\" (UniqueName: \"kubernetes.io/projected/49b900bd-df97-479a-9af4-bf6af9353f91-kube-api-access-zhq6n\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.314940 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b900bd-df97-479a-9af4-bf6af9353f91-secret-volume\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.315859 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b900bd-df97-479a-9af4-bf6af9353f91-config-volume\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.326849 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b900bd-df97-479a-9af4-bf6af9353f91-secret-volume\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.335611 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhq6n\" (UniqueName: \"kubernetes.io/projected/49b900bd-df97-479a-9af4-bf6af9353f91-kube-api-access-zhq6n\") pod \"collect-profiles-29416695-ssqbc\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.485470 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:00 crc kubenswrapper[4706]: I1206 06:15:00.918264 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc"] Dec 06 06:15:01 crc kubenswrapper[4706]: I1206 06:15:01.129914 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" event={"ID":"49b900bd-df97-479a-9af4-bf6af9353f91","Type":"ContainerStarted","Data":"407ae1f933ac87ddb9697cfebc2c423a51af97b63e864d8cc2e31cf358457aa2"} Dec 06 06:15:06 crc kubenswrapper[4706]: I1206 06:15:06.178945 4706 generic.go:334] "Generic (PLEG): container finished" podID="49b900bd-df97-479a-9af4-bf6af9353f91" containerID="9cf91811d64e983fe05300c58265c4ef5aafd7338657f949cdc40ef37b3d60d3" exitCode=0 Dec 06 06:15:06 crc kubenswrapper[4706]: I1206 06:15:06.179057 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" event={"ID":"49b900bd-df97-479a-9af4-bf6af9353f91","Type":"ContainerDied","Data":"9cf91811d64e983fe05300c58265c4ef5aafd7338657f949cdc40ef37b3d60d3"} Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.537785 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.693413 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b900bd-df97-479a-9af4-bf6af9353f91-secret-volume\") pod \"49b900bd-df97-479a-9af4-bf6af9353f91\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.693844 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b900bd-df97-479a-9af4-bf6af9353f91-config-volume\") pod \"49b900bd-df97-479a-9af4-bf6af9353f91\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.693889 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhq6n\" (UniqueName: \"kubernetes.io/projected/49b900bd-df97-479a-9af4-bf6af9353f91-kube-api-access-zhq6n\") pod \"49b900bd-df97-479a-9af4-bf6af9353f91\" (UID: \"49b900bd-df97-479a-9af4-bf6af9353f91\") " Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.694708 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49b900bd-df97-479a-9af4-bf6af9353f91-config-volume" (OuterVolumeSpecName: "config-volume") pod "49b900bd-df97-479a-9af4-bf6af9353f91" (UID: "49b900bd-df97-479a-9af4-bf6af9353f91"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.699501 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49b900bd-df97-479a-9af4-bf6af9353f91-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "49b900bd-df97-479a-9af4-bf6af9353f91" (UID: "49b900bd-df97-479a-9af4-bf6af9353f91"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.709204 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49b900bd-df97-479a-9af4-bf6af9353f91-kube-api-access-zhq6n" (OuterVolumeSpecName: "kube-api-access-zhq6n") pod "49b900bd-df97-479a-9af4-bf6af9353f91" (UID: "49b900bd-df97-479a-9af4-bf6af9353f91"). InnerVolumeSpecName "kube-api-access-zhq6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.796608 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/49b900bd-df97-479a-9af4-bf6af9353f91-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.796646 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/49b900bd-df97-479a-9af4-bf6af9353f91-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:15:07 crc kubenswrapper[4706]: I1206 06:15:07.796662 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhq6n\" (UniqueName: \"kubernetes.io/projected/49b900bd-df97-479a-9af4-bf6af9353f91-kube-api-access-zhq6n\") on node \"crc\" DevicePath \"\"" Dec 06 06:15:08 crc kubenswrapper[4706]: I1206 06:15:08.196366 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" event={"ID":"49b900bd-df97-479a-9af4-bf6af9353f91","Type":"ContainerDied","Data":"407ae1f933ac87ddb9697cfebc2c423a51af97b63e864d8cc2e31cf358457aa2"} Dec 06 06:15:08 crc kubenswrapper[4706]: I1206 06:15:08.196409 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416695-ssqbc" Dec 06 06:15:08 crc kubenswrapper[4706]: I1206 06:15:08.196420 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="407ae1f933ac87ddb9697cfebc2c423a51af97b63e864d8cc2e31cf358457aa2" Dec 06 06:15:08 crc kubenswrapper[4706]: I1206 06:15:08.610892 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj"] Dec 06 06:15:08 crc kubenswrapper[4706]: I1206 06:15:08.619113 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416650-fxvdj"] Dec 06 06:15:09 crc kubenswrapper[4706]: I1206 06:15:09.036421 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:15:09 crc kubenswrapper[4706]: E1206 06:15:09.036740 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:15:10 crc kubenswrapper[4706]: I1206 06:15:10.048179 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8e37b91-30b0-44fe-96d5-e01d222993b8" path="/var/lib/kubelet/pods/a8e37b91-30b0-44fe-96d5-e01d222993b8/volumes" Dec 06 06:15:23 crc kubenswrapper[4706]: I1206 06:15:23.036647 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:15:23 crc kubenswrapper[4706]: E1206 06:15:23.037841 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:15:34 crc kubenswrapper[4706]: I1206 06:15:34.035964 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:15:34 crc kubenswrapper[4706]: E1206 06:15:34.036954 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.654795 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Dec 06 06:15:36 crc kubenswrapper[4706]: E1206 06:15:36.655919 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49b900bd-df97-479a-9af4-bf6af9353f91" containerName="collect-profiles" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.655938 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="49b900bd-df97-479a-9af4-bf6af9353f91" containerName="collect-profiles" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.656295 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="49b900bd-df97-479a-9af4-bf6af9353f91" containerName="collect-profiles" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.657118 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.659030 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.659079 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.660270 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.661349 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-54qsk" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.664812 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.708296 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-config-data\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.708603 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.708722 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811061 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811148 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811239 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rf82n\" (UniqueName: \"kubernetes.io/projected/53ac9b54-4c61-4101-96d0-c247c09c0cdd-kube-api-access-rf82n\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811302 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811336 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811357 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811383 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-config-data\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811571 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.811657 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.812291 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.812587 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-config-data\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.821998 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913474 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rf82n\" (UniqueName: \"kubernetes.io/projected/53ac9b54-4c61-4101-96d0-c247c09c0cdd-kube-api-access-rf82n\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913565 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913599 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913621 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913667 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913693 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.913891 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.914556 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.914644 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.917745 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.921214 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.931121 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rf82n\" (UniqueName: \"kubernetes.io/projected/53ac9b54-4c61-4101-96d0-c247c09c0cdd-kube-api-access-rf82n\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.945743 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"tempest-tests-tempest\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " pod="openstack/tempest-tests-tempest" Dec 06 06:15:36 crc kubenswrapper[4706]: I1206 06:15:36.980488 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 06 06:15:37 crc kubenswrapper[4706]: I1206 06:15:37.478020 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Dec 06 06:15:38 crc kubenswrapper[4706]: I1206 06:15:38.486346 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"53ac9b54-4c61-4101-96d0-c247c09c0cdd","Type":"ContainerStarted","Data":"7c1170816c2c2c1df98e36760e69b13eef40684bb3cec5394a50136ce67766ce"} Dec 06 06:15:46 crc kubenswrapper[4706]: I1206 06:15:46.036917 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:15:46 crc kubenswrapper[4706]: E1206 06:15:46.037916 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:15:48 crc kubenswrapper[4706]: I1206 06:15:48.920185 4706 scope.go:117] "RemoveContainer" containerID="75f1758be77ed1bb0894a18feb0b5c390ebc10e1bf6ea21b36c816ae5621007c" Dec 06 06:16:01 crc kubenswrapper[4706]: I1206 06:16:01.039282 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:16:01 crc kubenswrapper[4706]: E1206 06:16:01.040085 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:16:12 crc kubenswrapper[4706]: E1206 06:16:12.390302 4706 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Dec 06 06:16:12 crc kubenswrapper[4706]: E1206 06:16:12.391016 4706 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rf82n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(53ac9b54-4c61-4101-96d0-c247c09c0cdd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Dec 06 06:16:12 crc kubenswrapper[4706]: E1206 06:16:12.392227 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="53ac9b54-4c61-4101-96d0-c247c09c0cdd" Dec 06 06:16:12 crc kubenswrapper[4706]: E1206 06:16:12.855168 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="53ac9b54-4c61-4101-96d0-c247c09c0cdd" Dec 06 06:16:16 crc kubenswrapper[4706]: I1206 06:16:16.036918 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:16:16 crc kubenswrapper[4706]: E1206 06:16:16.037553 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:16:27 crc kubenswrapper[4706]: I1206 06:16:27.455892 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Dec 06 06:16:28 crc kubenswrapper[4706]: I1206 06:16:28.042639 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:16:28 crc kubenswrapper[4706]: E1206 06:16:28.043123 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:16:29 crc kubenswrapper[4706]: I1206 06:16:29.011593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"53ac9b54-4c61-4101-96d0-c247c09c0cdd","Type":"ContainerStarted","Data":"33c54448ca700ee4eda61c381b8993e2330d7033306a5815a197c8665982555f"} Dec 06 06:16:29 crc kubenswrapper[4706]: I1206 06:16:29.027133 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.064189437 podStartE2EDuration="54.027116021s" podCreationTimestamp="2025-12-06 06:15:35 +0000 UTC" firstStartedPulling="2025-12-06 06:15:37.490947843 +0000 UTC m=+3359.818771787" lastFinishedPulling="2025-12-06 06:16:27.453874427 +0000 UTC m=+3409.781698371" observedRunningTime="2025-12-06 06:16:29.026830774 +0000 UTC m=+3411.354654718" watchObservedRunningTime="2025-12-06 06:16:29.027116021 +0000 UTC m=+3411.354939965" Dec 06 06:16:41 crc kubenswrapper[4706]: I1206 06:16:41.036410 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:16:41 crc kubenswrapper[4706]: E1206 06:16:41.037173 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:16:56 crc kubenswrapper[4706]: I1206 06:16:56.036478 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:16:56 crc kubenswrapper[4706]: E1206 06:16:56.037232 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:17:07 crc kubenswrapper[4706]: I1206 06:17:07.036015 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:17:09 crc kubenswrapper[4706]: I1206 06:17:09.416191 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"2e220d293f72c512167c6413d3e7480ac1155249a53e5125771dd174ef92c8c2"} Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.578457 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n7rvn"] Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.583679 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.615521 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n7rvn"] Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.706305 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-catalog-content\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.706386 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-utilities\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.706611 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldj95\" (UniqueName: \"kubernetes.io/projected/b027584d-500a-4820-9afd-c0459abeb8b4-kube-api-access-ldj95\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.808995 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-utilities\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.809253 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldj95\" (UniqueName: \"kubernetes.io/projected/b027584d-500a-4820-9afd-c0459abeb8b4-kube-api-access-ldj95\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.809469 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-catalog-content\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.809751 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-utilities\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.810264 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-catalog-content\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.830594 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldj95\" (UniqueName: \"kubernetes.io/projected/b027584d-500a-4820-9afd-c0459abeb8b4-kube-api-access-ldj95\") pod \"redhat-marketplace-n7rvn\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:10 crc kubenswrapper[4706]: I1206 06:18:10.909378 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:11 crc kubenswrapper[4706]: I1206 06:18:11.387256 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n7rvn"] Dec 06 06:18:11 crc kubenswrapper[4706]: W1206 06:18:11.387891 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb027584d_500a_4820_9afd_c0459abeb8b4.slice/crio-9f1815d71db83f147f0d0b888e0bcff85153928b8c4fd46542b29e33f581393b WatchSource:0}: Error finding container 9f1815d71db83f147f0d0b888e0bcff85153928b8c4fd46542b29e33f581393b: Status 404 returned error can't find the container with id 9f1815d71db83f147f0d0b888e0bcff85153928b8c4fd46542b29e33f581393b Dec 06 06:18:11 crc kubenswrapper[4706]: I1206 06:18:11.961637 4706 generic.go:334] "Generic (PLEG): container finished" podID="b027584d-500a-4820-9afd-c0459abeb8b4" containerID="647cb49731dd1a958d628afe3c11af5c95e2155bd5b414454beaa1ff5439aded" exitCode=0 Dec 06 06:18:11 crc kubenswrapper[4706]: I1206 06:18:11.961743 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerDied","Data":"647cb49731dd1a958d628afe3c11af5c95e2155bd5b414454beaa1ff5439aded"} Dec 06 06:18:11 crc kubenswrapper[4706]: I1206 06:18:11.961992 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerStarted","Data":"9f1815d71db83f147f0d0b888e0bcff85153928b8c4fd46542b29e33f581393b"} Dec 06 06:18:11 crc kubenswrapper[4706]: I1206 06:18:11.964325 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:18:12 crc kubenswrapper[4706]: I1206 06:18:12.973182 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerStarted","Data":"e217da3cc04f5b4fff95b7188da79952b6a36a3c2226803c0c317f17c2c9d89e"} Dec 06 06:18:13 crc kubenswrapper[4706]: I1206 06:18:13.987086 4706 generic.go:334] "Generic (PLEG): container finished" podID="b027584d-500a-4820-9afd-c0459abeb8b4" containerID="e217da3cc04f5b4fff95b7188da79952b6a36a3c2226803c0c317f17c2c9d89e" exitCode=0 Dec 06 06:18:13 crc kubenswrapper[4706]: I1206 06:18:13.987137 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerDied","Data":"e217da3cc04f5b4fff95b7188da79952b6a36a3c2226803c0c317f17c2c9d89e"} Dec 06 06:18:15 crc kubenswrapper[4706]: I1206 06:18:15.009287 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerStarted","Data":"562ac639b5f98bd0847779f944e4e59fb308f1a11e1f0e10c2c452c390eb057c"} Dec 06 06:18:15 crc kubenswrapper[4706]: I1206 06:18:15.034357 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n7rvn" podStartSLOduration=2.595672745 podStartE2EDuration="5.034332761s" podCreationTimestamp="2025-12-06 06:18:10 +0000 UTC" firstStartedPulling="2025-12-06 06:18:11.964113338 +0000 UTC m=+3514.291937282" lastFinishedPulling="2025-12-06 06:18:14.402773364 +0000 UTC m=+3516.730597298" observedRunningTime="2025-12-06 06:18:15.027449324 +0000 UTC m=+3517.355273288" watchObservedRunningTime="2025-12-06 06:18:15.034332761 +0000 UTC m=+3517.362156705" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.751397 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7dkk5"] Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.753997 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.816133 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7dkk5"] Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.893878 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-catalog-content\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.894101 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-utilities\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.894135 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lt2b\" (UniqueName: \"kubernetes.io/projected/7626c09f-99f0-4799-b790-deea0ec6d052-kube-api-access-7lt2b\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.910478 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.910530 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.970913 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.995744 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-utilities\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.995785 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lt2b\" (UniqueName: \"kubernetes.io/projected/7626c09f-99f0-4799-b790-deea0ec6d052-kube-api-access-7lt2b\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.995847 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-catalog-content\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.996418 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-catalog-content\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:20 crc kubenswrapper[4706]: I1206 06:18:20.997666 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-utilities\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:21 crc kubenswrapper[4706]: I1206 06:18:21.016288 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lt2b\" (UniqueName: \"kubernetes.io/projected/7626c09f-99f0-4799-b790-deea0ec6d052-kube-api-access-7lt2b\") pod \"community-operators-7dkk5\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:21 crc kubenswrapper[4706]: I1206 06:18:21.124029 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:21 crc kubenswrapper[4706]: I1206 06:18:21.171480 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:21 crc kubenswrapper[4706]: I1206 06:18:21.621545 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7dkk5"] Dec 06 06:18:22 crc kubenswrapper[4706]: I1206 06:18:22.091193 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerStarted","Data":"056565eac78771c6a6b7a124efb0efa66d9ee37a8c5b2f101571578a7418fcaa"} Dec 06 06:18:23 crc kubenswrapper[4706]: I1206 06:18:23.100031 4706 generic.go:334] "Generic (PLEG): container finished" podID="7626c09f-99f0-4799-b790-deea0ec6d052" containerID="4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81" exitCode=0 Dec 06 06:18:23 crc kubenswrapper[4706]: I1206 06:18:23.100121 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerDied","Data":"4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81"} Dec 06 06:18:23 crc kubenswrapper[4706]: I1206 06:18:23.328598 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n7rvn"] Dec 06 06:18:23 crc kubenswrapper[4706]: I1206 06:18:23.329275 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n7rvn" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="registry-server" containerID="cri-o://562ac639b5f98bd0847779f944e4e59fb308f1a11e1f0e10c2c452c390eb057c" gracePeriod=2 Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.124119 4706 generic.go:334] "Generic (PLEG): container finished" podID="b027584d-500a-4820-9afd-c0459abeb8b4" containerID="562ac639b5f98bd0847779f944e4e59fb308f1a11e1f0e10c2c452c390eb057c" exitCode=0 Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.124360 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerDied","Data":"562ac639b5f98bd0847779f944e4e59fb308f1a11e1f0e10c2c452c390eb057c"} Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.129821 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerStarted","Data":"7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a"} Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.392108 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.579678 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-utilities\") pod \"b027584d-500a-4820-9afd-c0459abeb8b4\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.579794 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-catalog-content\") pod \"b027584d-500a-4820-9afd-c0459abeb8b4\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.579909 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ldj95\" (UniqueName: \"kubernetes.io/projected/b027584d-500a-4820-9afd-c0459abeb8b4-kube-api-access-ldj95\") pod \"b027584d-500a-4820-9afd-c0459abeb8b4\" (UID: \"b027584d-500a-4820-9afd-c0459abeb8b4\") " Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.580992 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-utilities" (OuterVolumeSpecName: "utilities") pod "b027584d-500a-4820-9afd-c0459abeb8b4" (UID: "b027584d-500a-4820-9afd-c0459abeb8b4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.603010 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b027584d-500a-4820-9afd-c0459abeb8b4" (UID: "b027584d-500a-4820-9afd-c0459abeb8b4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.608289 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b027584d-500a-4820-9afd-c0459abeb8b4-kube-api-access-ldj95" (OuterVolumeSpecName: "kube-api-access-ldj95") pod "b027584d-500a-4820-9afd-c0459abeb8b4" (UID: "b027584d-500a-4820-9afd-c0459abeb8b4"). InnerVolumeSpecName "kube-api-access-ldj95". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.682506 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.682542 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b027584d-500a-4820-9afd-c0459abeb8b4-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:18:24 crc kubenswrapper[4706]: I1206 06:18:24.682553 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ldj95\" (UniqueName: \"kubernetes.io/projected/b027584d-500a-4820-9afd-c0459abeb8b4-kube-api-access-ldj95\") on node \"crc\" DevicePath \"\"" Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.142873 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n7rvn" event={"ID":"b027584d-500a-4820-9afd-c0459abeb8b4","Type":"ContainerDied","Data":"9f1815d71db83f147f0d0b888e0bcff85153928b8c4fd46542b29e33f581393b"} Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.142941 4706 scope.go:117] "RemoveContainer" containerID="562ac639b5f98bd0847779f944e4e59fb308f1a11e1f0e10c2c452c390eb057c" Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.142972 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n7rvn" Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.145309 4706 generic.go:334] "Generic (PLEG): container finished" podID="7626c09f-99f0-4799-b790-deea0ec6d052" containerID="7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a" exitCode=0 Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.145350 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerDied","Data":"7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a"} Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.181180 4706 scope.go:117] "RemoveContainer" containerID="e217da3cc04f5b4fff95b7188da79952b6a36a3c2226803c0c317f17c2c9d89e" Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.211566 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n7rvn"] Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.220472 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n7rvn"] Dec 06 06:18:25 crc kubenswrapper[4706]: I1206 06:18:25.225299 4706 scope.go:117] "RemoveContainer" containerID="647cb49731dd1a958d628afe3c11af5c95e2155bd5b414454beaa1ff5439aded" Dec 06 06:18:26 crc kubenswrapper[4706]: I1206 06:18:26.045637 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" path="/var/lib/kubelet/pods/b027584d-500a-4820-9afd-c0459abeb8b4/volumes" Dec 06 06:18:26 crc kubenswrapper[4706]: I1206 06:18:26.156546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerStarted","Data":"b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f"} Dec 06 06:18:26 crc kubenswrapper[4706]: I1206 06:18:26.172511 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7dkk5" podStartSLOduration=3.695181777 podStartE2EDuration="6.17249394s" podCreationTimestamp="2025-12-06 06:18:20 +0000 UTC" firstStartedPulling="2025-12-06 06:18:23.104085976 +0000 UTC m=+3525.431909920" lastFinishedPulling="2025-12-06 06:18:25.581398149 +0000 UTC m=+3527.909222083" observedRunningTime="2025-12-06 06:18:26.171341089 +0000 UTC m=+3528.499165033" watchObservedRunningTime="2025-12-06 06:18:26.17249394 +0000 UTC m=+3528.500317884" Dec 06 06:18:31 crc kubenswrapper[4706]: I1206 06:18:31.124447 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:31 crc kubenswrapper[4706]: I1206 06:18:31.125339 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:31 crc kubenswrapper[4706]: I1206 06:18:31.175417 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:31 crc kubenswrapper[4706]: I1206 06:18:31.250436 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:31 crc kubenswrapper[4706]: I1206 06:18:31.417029 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7dkk5"] Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.214959 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7dkk5" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="registry-server" containerID="cri-o://b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f" gracePeriod=2 Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.717039 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.870630 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-utilities\") pod \"7626c09f-99f0-4799-b790-deea0ec6d052\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.870776 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7lt2b\" (UniqueName: \"kubernetes.io/projected/7626c09f-99f0-4799-b790-deea0ec6d052-kube-api-access-7lt2b\") pod \"7626c09f-99f0-4799-b790-deea0ec6d052\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.870911 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-catalog-content\") pod \"7626c09f-99f0-4799-b790-deea0ec6d052\" (UID: \"7626c09f-99f0-4799-b790-deea0ec6d052\") " Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.871792 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-utilities" (OuterVolumeSpecName: "utilities") pod "7626c09f-99f0-4799-b790-deea0ec6d052" (UID: "7626c09f-99f0-4799-b790-deea0ec6d052"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.877062 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7626c09f-99f0-4799-b790-deea0ec6d052-kube-api-access-7lt2b" (OuterVolumeSpecName: "kube-api-access-7lt2b") pod "7626c09f-99f0-4799-b790-deea0ec6d052" (UID: "7626c09f-99f0-4799-b790-deea0ec6d052"). InnerVolumeSpecName "kube-api-access-7lt2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.932960 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7626c09f-99f0-4799-b790-deea0ec6d052" (UID: "7626c09f-99f0-4799-b790-deea0ec6d052"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.973226 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.973268 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7626c09f-99f0-4799-b790-deea0ec6d052-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:18:33 crc kubenswrapper[4706]: I1206 06:18:33.973280 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7lt2b\" (UniqueName: \"kubernetes.io/projected/7626c09f-99f0-4799-b790-deea0ec6d052-kube-api-access-7lt2b\") on node \"crc\" DevicePath \"\"" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.224678 4706 generic.go:334] "Generic (PLEG): container finished" podID="7626c09f-99f0-4799-b790-deea0ec6d052" containerID="b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f" exitCode=0 Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.224735 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerDied","Data":"b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f"} Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.225138 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7dkk5" event={"ID":"7626c09f-99f0-4799-b790-deea0ec6d052","Type":"ContainerDied","Data":"056565eac78771c6a6b7a124efb0efa66d9ee37a8c5b2f101571578a7418fcaa"} Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.225162 4706 scope.go:117] "RemoveContainer" containerID="b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.224776 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7dkk5" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.255190 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7dkk5"] Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.261782 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7dkk5"] Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.270557 4706 scope.go:117] "RemoveContainer" containerID="7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.292919 4706 scope.go:117] "RemoveContainer" containerID="4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.349467 4706 scope.go:117] "RemoveContainer" containerID="b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f" Dec 06 06:18:34 crc kubenswrapper[4706]: E1206 06:18:34.349945 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f\": container with ID starting with b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f not found: ID does not exist" containerID="b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.349983 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f"} err="failed to get container status \"b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f\": rpc error: code = NotFound desc = could not find container \"b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f\": container with ID starting with b27406a6865ad5d50b3093ae70f5c5991dd974d0ebc2c3387ad5e2ba1af1d75f not found: ID does not exist" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.350027 4706 scope.go:117] "RemoveContainer" containerID="7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a" Dec 06 06:18:34 crc kubenswrapper[4706]: E1206 06:18:34.350460 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a\": container with ID starting with 7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a not found: ID does not exist" containerID="7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.350484 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a"} err="failed to get container status \"7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a\": rpc error: code = NotFound desc = could not find container \"7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a\": container with ID starting with 7d91a118e47c3bf32c2498928b80418cf10046e855f94e7a21122dbc99061d6a not found: ID does not exist" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.350500 4706 scope.go:117] "RemoveContainer" containerID="4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81" Dec 06 06:18:34 crc kubenswrapper[4706]: E1206 06:18:34.350905 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81\": container with ID starting with 4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81 not found: ID does not exist" containerID="4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81" Dec 06 06:18:34 crc kubenswrapper[4706]: I1206 06:18:34.350954 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81"} err="failed to get container status \"4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81\": rpc error: code = NotFound desc = could not find container \"4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81\": container with ID starting with 4137052bb9e3697d9801eaee60d5a0ae4ff174344d314e5fb8561593b7717e81 not found: ID does not exist" Dec 06 06:18:36 crc kubenswrapper[4706]: I1206 06:18:36.072640 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" path="/var/lib/kubelet/pods/7626c09f-99f0-4799-b790-deea0ec6d052/volumes" Dec 06 06:19:35 crc kubenswrapper[4706]: I1206 06:19:35.961961 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:19:35 crc kubenswrapper[4706]: I1206 06:19:35.962854 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:20:05 crc kubenswrapper[4706]: I1206 06:20:05.961075 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:20:05 crc kubenswrapper[4706]: I1206 06:20:05.961835 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:20:35 crc kubenswrapper[4706]: I1206 06:20:35.961708 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:20:35 crc kubenswrapper[4706]: I1206 06:20:35.962385 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:20:35 crc kubenswrapper[4706]: I1206 06:20:35.962430 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:20:35 crc kubenswrapper[4706]: I1206 06:20:35.963118 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2e220d293f72c512167c6413d3e7480ac1155249a53e5125771dd174ef92c8c2"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:20:35 crc kubenswrapper[4706]: I1206 06:20:35.963168 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://2e220d293f72c512167c6413d3e7480ac1155249a53e5125771dd174ef92c8c2" gracePeriod=600 Dec 06 06:20:36 crc kubenswrapper[4706]: I1206 06:20:36.292482 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="2e220d293f72c512167c6413d3e7480ac1155249a53e5125771dd174ef92c8c2" exitCode=0 Dec 06 06:20:36 crc kubenswrapper[4706]: I1206 06:20:36.292528 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"2e220d293f72c512167c6413d3e7480ac1155249a53e5125771dd174ef92c8c2"} Dec 06 06:20:36 crc kubenswrapper[4706]: I1206 06:20:36.292565 4706 scope.go:117] "RemoveContainer" containerID="9a2c0a11f83490d2bf34ec72bce2527d8cb46fd5bdf1f4eb641238f5f5fd25bc" Dec 06 06:20:37 crc kubenswrapper[4706]: I1206 06:20:37.304605 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f"} Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.755395 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-p6zl7"] Dec 06 06:21:42 crc kubenswrapper[4706]: E1206 06:21:42.757248 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="registry-server" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.757268 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="registry-server" Dec 06 06:21:42 crc kubenswrapper[4706]: E1206 06:21:42.757295 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="extract-utilities" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.757303 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="extract-utilities" Dec 06 06:21:42 crc kubenswrapper[4706]: E1206 06:21:42.757334 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="registry-server" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.757341 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="registry-server" Dec 06 06:21:42 crc kubenswrapper[4706]: E1206 06:21:42.757360 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="extract-utilities" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.757366 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="extract-utilities" Dec 06 06:21:42 crc kubenswrapper[4706]: E1206 06:21:42.757390 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="extract-content" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.757396 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="extract-content" Dec 06 06:21:42 crc kubenswrapper[4706]: E1206 06:21:42.757415 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="extract-content" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.757421 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="extract-content" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.758836 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="7626c09f-99f0-4799-b790-deea0ec6d052" containerName="registry-server" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.758893 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b027584d-500a-4820-9afd-c0459abeb8b4" containerName="registry-server" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.762733 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.796003 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p6zl7"] Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.883752 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qxznb\" (UniqueName: \"kubernetes.io/projected/35ac058f-99eb-4655-9a22-1fc470b2c295-kube-api-access-qxznb\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.884016 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-utilities\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.884087 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-catalog-content\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.985806 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-utilities\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.985847 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-catalog-content\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.985942 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qxznb\" (UniqueName: \"kubernetes.io/projected/35ac058f-99eb-4655-9a22-1fc470b2c295-kube-api-access-qxznb\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.986503 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-utilities\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:42 crc kubenswrapper[4706]: I1206 06:21:42.986522 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-catalog-content\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:43 crc kubenswrapper[4706]: I1206 06:21:43.006119 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qxznb\" (UniqueName: \"kubernetes.io/projected/35ac058f-99eb-4655-9a22-1fc470b2c295-kube-api-access-qxznb\") pod \"certified-operators-p6zl7\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:43 crc kubenswrapper[4706]: I1206 06:21:43.156282 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:43 crc kubenswrapper[4706]: I1206 06:21:43.645672 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-p6zl7"] Dec 06 06:21:43 crc kubenswrapper[4706]: I1206 06:21:43.902068 4706 generic.go:334] "Generic (PLEG): container finished" podID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerID="4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906" exitCode=0 Dec 06 06:21:43 crc kubenswrapper[4706]: I1206 06:21:43.902121 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerDied","Data":"4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906"} Dec 06 06:21:43 crc kubenswrapper[4706]: I1206 06:21:43.902149 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerStarted","Data":"9477e1c885faaebcdd1569df3e1b204dc1f7831c8ddc50b6510c77d753d319a0"} Dec 06 06:21:44 crc kubenswrapper[4706]: I1206 06:21:44.912237 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerStarted","Data":"4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304"} Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.359099 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mwlhv"] Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.361342 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.372793 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mwlhv"] Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.432040 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-catalog-content\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.432144 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-utilities\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.432494 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjlb2\" (UniqueName: \"kubernetes.io/projected/c55d489f-98e0-4e7a-a63b-250475c8e7e9-kube-api-access-zjlb2\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.534117 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-catalog-content\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.534204 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-utilities\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.534338 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjlb2\" (UniqueName: \"kubernetes.io/projected/c55d489f-98e0-4e7a-a63b-250475c8e7e9-kube-api-access-zjlb2\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.535749 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-catalog-content\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.535887 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-utilities\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.554454 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjlb2\" (UniqueName: \"kubernetes.io/projected/c55d489f-98e0-4e7a-a63b-250475c8e7e9-kube-api-access-zjlb2\") pod \"redhat-operators-mwlhv\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.687754 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.923332 4706 generic.go:334] "Generic (PLEG): container finished" podID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerID="4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304" exitCode=0 Dec 06 06:21:45 crc kubenswrapper[4706]: I1206 06:21:45.923386 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerDied","Data":"4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304"} Dec 06 06:21:46 crc kubenswrapper[4706]: I1206 06:21:46.176221 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mwlhv"] Dec 06 06:21:46 crc kubenswrapper[4706]: I1206 06:21:46.947523 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerStarted","Data":"3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4"} Dec 06 06:21:46 crc kubenswrapper[4706]: I1206 06:21:46.951703 4706 generic.go:334] "Generic (PLEG): container finished" podID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerID="74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca" exitCode=0 Dec 06 06:21:46 crc kubenswrapper[4706]: I1206 06:21:46.951767 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerDied","Data":"74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca"} Dec 06 06:21:46 crc kubenswrapper[4706]: I1206 06:21:46.951819 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerStarted","Data":"ccc7262e44d1603ea23241aa992d0201efd287aed18da0ee901d8bee054cabe9"} Dec 06 06:21:46 crc kubenswrapper[4706]: I1206 06:21:46.979846 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-p6zl7" podStartSLOduration=2.597229431 podStartE2EDuration="4.979826208s" podCreationTimestamp="2025-12-06 06:21:42 +0000 UTC" firstStartedPulling="2025-12-06 06:21:43.904280671 +0000 UTC m=+3726.232104615" lastFinishedPulling="2025-12-06 06:21:46.286877448 +0000 UTC m=+3728.614701392" observedRunningTime="2025-12-06 06:21:46.9791586 +0000 UTC m=+3729.306982624" watchObservedRunningTime="2025-12-06 06:21:46.979826208 +0000 UTC m=+3729.307650152" Dec 06 06:21:48 crc kubenswrapper[4706]: I1206 06:21:48.973741 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerStarted","Data":"5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8"} Dec 06 06:21:49 crc kubenswrapper[4706]: I1206 06:21:49.986066 4706 generic.go:334] "Generic (PLEG): container finished" podID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerID="5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8" exitCode=0 Dec 06 06:21:49 crc kubenswrapper[4706]: I1206 06:21:49.986173 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerDied","Data":"5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8"} Dec 06 06:21:52 crc kubenswrapper[4706]: I1206 06:21:52.007875 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerStarted","Data":"4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670"} Dec 06 06:21:52 crc kubenswrapper[4706]: I1206 06:21:52.030349 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mwlhv" podStartSLOduration=2.283875514 podStartE2EDuration="7.030329s" podCreationTimestamp="2025-12-06 06:21:45 +0000 UTC" firstStartedPulling="2025-12-06 06:21:46.95479085 +0000 UTC m=+3729.282614794" lastFinishedPulling="2025-12-06 06:21:51.701244326 +0000 UTC m=+3734.029068280" observedRunningTime="2025-12-06 06:21:52.027436852 +0000 UTC m=+3734.355260816" watchObservedRunningTime="2025-12-06 06:21:52.030329 +0000 UTC m=+3734.358152954" Dec 06 06:21:53 crc kubenswrapper[4706]: I1206 06:21:53.156687 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:53 crc kubenswrapper[4706]: I1206 06:21:53.156743 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:53 crc kubenswrapper[4706]: I1206 06:21:53.203251 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:54 crc kubenswrapper[4706]: I1206 06:21:54.077598 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:54 crc kubenswrapper[4706]: I1206 06:21:54.346133 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p6zl7"] Dec 06 06:21:55 crc kubenswrapper[4706]: I1206 06:21:55.689701 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:55 crc kubenswrapper[4706]: I1206 06:21:55.690863 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.060016 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-p6zl7" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="registry-server" containerID="cri-o://3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4" gracePeriod=2 Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.599425 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.737983 4706 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mwlhv" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="registry-server" probeResult="failure" output=< Dec 06 06:21:56 crc kubenswrapper[4706]: timeout: failed to connect service ":50051" within 1s Dec 06 06:21:56 crc kubenswrapper[4706]: > Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.773589 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qxznb\" (UniqueName: \"kubernetes.io/projected/35ac058f-99eb-4655-9a22-1fc470b2c295-kube-api-access-qxznb\") pod \"35ac058f-99eb-4655-9a22-1fc470b2c295\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.773762 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-utilities\") pod \"35ac058f-99eb-4655-9a22-1fc470b2c295\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.773848 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-catalog-content\") pod \"35ac058f-99eb-4655-9a22-1fc470b2c295\" (UID: \"35ac058f-99eb-4655-9a22-1fc470b2c295\") " Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.774439 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-utilities" (OuterVolumeSpecName: "utilities") pod "35ac058f-99eb-4655-9a22-1fc470b2c295" (UID: "35ac058f-99eb-4655-9a22-1fc470b2c295"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.782785 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35ac058f-99eb-4655-9a22-1fc470b2c295-kube-api-access-qxznb" (OuterVolumeSpecName: "kube-api-access-qxznb") pod "35ac058f-99eb-4655-9a22-1fc470b2c295" (UID: "35ac058f-99eb-4655-9a22-1fc470b2c295"). InnerVolumeSpecName "kube-api-access-qxznb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.821472 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "35ac058f-99eb-4655-9a22-1fc470b2c295" (UID: "35ac058f-99eb-4655-9a22-1fc470b2c295"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.875999 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.876040 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qxznb\" (UniqueName: \"kubernetes.io/projected/35ac058f-99eb-4655-9a22-1fc470b2c295-kube-api-access-qxznb\") on node \"crc\" DevicePath \"\"" Dec 06 06:21:56 crc kubenswrapper[4706]: I1206 06:21:56.876068 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/35ac058f-99eb-4655-9a22-1fc470b2c295-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.074351 4706 generic.go:334] "Generic (PLEG): container finished" podID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerID="3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4" exitCode=0 Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.074401 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerDied","Data":"3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4"} Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.074432 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-p6zl7" event={"ID":"35ac058f-99eb-4655-9a22-1fc470b2c295","Type":"ContainerDied","Data":"9477e1c885faaebcdd1569df3e1b204dc1f7831c8ddc50b6510c77d753d319a0"} Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.074431 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-p6zl7" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.074453 4706 scope.go:117] "RemoveContainer" containerID="3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.105665 4706 scope.go:117] "RemoveContainer" containerID="4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.126393 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-p6zl7"] Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.133259 4706 scope.go:117] "RemoveContainer" containerID="4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.139261 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-p6zl7"] Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.188636 4706 scope.go:117] "RemoveContainer" containerID="3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4" Dec 06 06:21:57 crc kubenswrapper[4706]: E1206 06:21:57.193692 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4\": container with ID starting with 3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4 not found: ID does not exist" containerID="3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.193739 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4"} err="failed to get container status \"3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4\": rpc error: code = NotFound desc = could not find container \"3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4\": container with ID starting with 3ce09e1c4852da99eb60a90e19f56370079d252382ce06d0e04c08b254c455b4 not found: ID does not exist" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.193770 4706 scope.go:117] "RemoveContainer" containerID="4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304" Dec 06 06:21:57 crc kubenswrapper[4706]: E1206 06:21:57.194802 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304\": container with ID starting with 4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304 not found: ID does not exist" containerID="4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.194835 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304"} err="failed to get container status \"4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304\": rpc error: code = NotFound desc = could not find container \"4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304\": container with ID starting with 4bb214cd36146c8e874dea89ae75ae1e6aae9738458c1072fc85a3dd3767e304 not found: ID does not exist" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.194854 4706 scope.go:117] "RemoveContainer" containerID="4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906" Dec 06 06:21:57 crc kubenswrapper[4706]: E1206 06:21:57.195153 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906\": container with ID starting with 4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906 not found: ID does not exist" containerID="4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906" Dec 06 06:21:57 crc kubenswrapper[4706]: I1206 06:21:57.195200 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906"} err="failed to get container status \"4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906\": rpc error: code = NotFound desc = could not find container \"4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906\": container with ID starting with 4adfbea1d13781dfa4b78ffb7347caf497c7b710b35bc0a961f6e4f140d4d906 not found: ID does not exist" Dec 06 06:21:58 crc kubenswrapper[4706]: I1206 06:21:58.048550 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" path="/var/lib/kubelet/pods/35ac058f-99eb-4655-9a22-1fc470b2c295/volumes" Dec 06 06:22:05 crc kubenswrapper[4706]: I1206 06:22:05.750301 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:22:05 crc kubenswrapper[4706]: I1206 06:22:05.801561 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:22:05 crc kubenswrapper[4706]: I1206 06:22:05.990909 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mwlhv"] Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.191600 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mwlhv" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="registry-server" containerID="cri-o://4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670" gracePeriod=2 Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.673928 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.796235 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-utilities\") pod \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.796312 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjlb2\" (UniqueName: \"kubernetes.io/projected/c55d489f-98e0-4e7a-a63b-250475c8e7e9-kube-api-access-zjlb2\") pod \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.796381 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-catalog-content\") pod \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\" (UID: \"c55d489f-98e0-4e7a-a63b-250475c8e7e9\") " Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.796958 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-utilities" (OuterVolumeSpecName: "utilities") pod "c55d489f-98e0-4e7a-a63b-250475c8e7e9" (UID: "c55d489f-98e0-4e7a-a63b-250475c8e7e9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.805278 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c55d489f-98e0-4e7a-a63b-250475c8e7e9-kube-api-access-zjlb2" (OuterVolumeSpecName: "kube-api-access-zjlb2") pod "c55d489f-98e0-4e7a-a63b-250475c8e7e9" (UID: "c55d489f-98e0-4e7a-a63b-250475c8e7e9"). InnerVolumeSpecName "kube-api-access-zjlb2". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.898953 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.898985 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjlb2\" (UniqueName: \"kubernetes.io/projected/c55d489f-98e0-4e7a-a63b-250475c8e7e9-kube-api-access-zjlb2\") on node \"crc\" DevicePath \"\"" Dec 06 06:22:07 crc kubenswrapper[4706]: I1206 06:22:07.921661 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c55d489f-98e0-4e7a-a63b-250475c8e7e9" (UID: "c55d489f-98e0-4e7a-a63b-250475c8e7e9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.000594 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c55d489f-98e0-4e7a-a63b-250475c8e7e9-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.203816 4706 generic.go:334] "Generic (PLEG): container finished" podID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerID="4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670" exitCode=0 Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.203882 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerDied","Data":"4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670"} Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.205238 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mwlhv" event={"ID":"c55d489f-98e0-4e7a-a63b-250475c8e7e9","Type":"ContainerDied","Data":"ccc7262e44d1603ea23241aa992d0201efd287aed18da0ee901d8bee054cabe9"} Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.203891 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mwlhv" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.205265 4706 scope.go:117] "RemoveContainer" containerID="4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.240958 4706 scope.go:117] "RemoveContainer" containerID="5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.246858 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mwlhv"] Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.257193 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mwlhv"] Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.267967 4706 scope.go:117] "RemoveContainer" containerID="74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.329030 4706 scope.go:117] "RemoveContainer" containerID="4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670" Dec 06 06:22:08 crc kubenswrapper[4706]: E1206 06:22:08.329524 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670\": container with ID starting with 4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670 not found: ID does not exist" containerID="4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.329588 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670"} err="failed to get container status \"4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670\": rpc error: code = NotFound desc = could not find container \"4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670\": container with ID starting with 4cb66b9dc6a74e3d4642376bede6c590e1a9a5262982c28882b20a17ef437670 not found: ID does not exist" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.329633 4706 scope.go:117] "RemoveContainer" containerID="5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8" Dec 06 06:22:08 crc kubenswrapper[4706]: E1206 06:22:08.329917 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8\": container with ID starting with 5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8 not found: ID does not exist" containerID="5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.329956 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8"} err="failed to get container status \"5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8\": rpc error: code = NotFound desc = could not find container \"5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8\": container with ID starting with 5e26fe4724d1eb4af049746725d764523fa849c050d2bcc6c73880ea440654d8 not found: ID does not exist" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.329980 4706 scope.go:117] "RemoveContainer" containerID="74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca" Dec 06 06:22:08 crc kubenswrapper[4706]: E1206 06:22:08.330181 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca\": container with ID starting with 74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca not found: ID does not exist" containerID="74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca" Dec 06 06:22:08 crc kubenswrapper[4706]: I1206 06:22:08.330202 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca"} err="failed to get container status \"74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca\": rpc error: code = NotFound desc = could not find container \"74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca\": container with ID starting with 74218abb51d014ebd2613ec7f9eaf6ff9b1f25b2de3ef0a10c0dbc5708a105ca not found: ID does not exist" Dec 06 06:22:10 crc kubenswrapper[4706]: I1206 06:22:10.053760 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" path="/var/lib/kubelet/pods/c55d489f-98e0-4e7a-a63b-250475c8e7e9/volumes" Dec 06 06:23:05 crc kubenswrapper[4706]: I1206 06:23:05.962018 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:23:05 crc kubenswrapper[4706]: I1206 06:23:05.962576 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:23:35 crc kubenswrapper[4706]: I1206 06:23:35.961707 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:23:35 crc kubenswrapper[4706]: I1206 06:23:35.964248 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:24:05 crc kubenswrapper[4706]: I1206 06:24:05.961118 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:24:05 crc kubenswrapper[4706]: I1206 06:24:05.961713 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:24:05 crc kubenswrapper[4706]: I1206 06:24:05.961770 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:24:05 crc kubenswrapper[4706]: I1206 06:24:05.962864 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:24:05 crc kubenswrapper[4706]: I1206 06:24:05.962947 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" gracePeriod=600 Dec 06 06:24:06 crc kubenswrapper[4706]: E1206 06:24:06.083900 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:24:06 crc kubenswrapper[4706]: I1206 06:24:06.230521 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" exitCode=0 Dec 06 06:24:06 crc kubenswrapper[4706]: I1206 06:24:06.230562 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f"} Dec 06 06:24:06 crc kubenswrapper[4706]: I1206 06:24:06.230592 4706 scope.go:117] "RemoveContainer" containerID="2e220d293f72c512167c6413d3e7480ac1155249a53e5125771dd174ef92c8c2" Dec 06 06:24:06 crc kubenswrapper[4706]: I1206 06:24:06.231180 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:24:06 crc kubenswrapper[4706]: E1206 06:24:06.231404 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:24:21 crc kubenswrapper[4706]: I1206 06:24:21.036818 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:24:21 crc kubenswrapper[4706]: E1206 06:24:21.037558 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:24:33 crc kubenswrapper[4706]: I1206 06:24:33.036794 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:24:33 crc kubenswrapper[4706]: E1206 06:24:33.038311 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:24:46 crc kubenswrapper[4706]: I1206 06:24:46.036083 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:24:46 crc kubenswrapper[4706]: E1206 06:24:46.036767 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:25:01 crc kubenswrapper[4706]: I1206 06:25:01.036851 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:25:01 crc kubenswrapper[4706]: E1206 06:25:01.037643 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:25:16 crc kubenswrapper[4706]: I1206 06:25:16.036813 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:25:16 crc kubenswrapper[4706]: E1206 06:25:16.037664 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:25:27 crc kubenswrapper[4706]: I1206 06:25:27.036084 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:25:27 crc kubenswrapper[4706]: E1206 06:25:27.036842 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:25:38 crc kubenswrapper[4706]: I1206 06:25:38.043251 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:25:38 crc kubenswrapper[4706]: E1206 06:25:38.044117 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:25:49 crc kubenswrapper[4706]: I1206 06:25:49.036971 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:25:49 crc kubenswrapper[4706]: E1206 06:25:49.037860 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:26:04 crc kubenswrapper[4706]: I1206 06:26:04.036560 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:26:04 crc kubenswrapper[4706]: E1206 06:26:04.037468 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:26:18 crc kubenswrapper[4706]: I1206 06:26:18.042111 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:26:18 crc kubenswrapper[4706]: E1206 06:26:18.042850 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:26:29 crc kubenswrapper[4706]: I1206 06:26:29.036142 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:26:29 crc kubenswrapper[4706]: E1206 06:26:29.037144 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:26:42 crc kubenswrapper[4706]: I1206 06:26:42.036797 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:26:42 crc kubenswrapper[4706]: E1206 06:26:42.037740 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:26:55 crc kubenswrapper[4706]: I1206 06:26:55.036496 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:26:55 crc kubenswrapper[4706]: E1206 06:26:55.037312 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:27:08 crc kubenswrapper[4706]: I1206 06:27:08.043745 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:27:08 crc kubenswrapper[4706]: E1206 06:27:08.044656 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:27:19 crc kubenswrapper[4706]: I1206 06:27:19.037208 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:27:19 crc kubenswrapper[4706]: E1206 06:27:19.038158 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:27:30 crc kubenswrapper[4706]: I1206 06:27:30.035804 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:27:30 crc kubenswrapper[4706]: E1206 06:27:30.036709 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:27:42 crc kubenswrapper[4706]: I1206 06:27:42.053503 4706 generic.go:334] "Generic (PLEG): container finished" podID="53ac9b54-4c61-4101-96d0-c247c09c0cdd" containerID="33c54448ca700ee4eda61c381b8993e2330d7033306a5815a197c8665982555f" exitCode=0 Dec 06 06:27:42 crc kubenswrapper[4706]: I1206 06:27:42.061020 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"53ac9b54-4c61-4101-96d0-c247c09c0cdd","Type":"ContainerDied","Data":"33c54448ca700ee4eda61c381b8993e2330d7033306a5815a197c8665982555f"} Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.504488 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603557 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config-secret\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603683 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-workdir\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603727 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ca-certs\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603790 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-temporary\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603853 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603889 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rf82n\" (UniqueName: \"kubernetes.io/projected/53ac9b54-4c61-4101-96d0-c247c09c0cdd-kube-api-access-rf82n\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.603914 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-config-data\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.604379 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.604479 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.604496 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ssh-key\") pod \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\" (UID: \"53ac9b54-4c61-4101-96d0-c247c09c0cdd\") " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.605090 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-config-data" (OuterVolumeSpecName: "config-data") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.605619 4706 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.605636 4706 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-config-data\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.609307 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.609965 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53ac9b54-4c61-4101-96d0-c247c09c0cdd-kube-api-access-rf82n" (OuterVolumeSpecName: "kube-api-access-rf82n") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "kube-api-access-rf82n". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.611262 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "test-operator-logs") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.638171 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.639215 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.639984 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.659022 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "53ac9b54-4c61-4101-96d0-c247c09c0cdd" (UID: "53ac9b54-4c61-4101-96d0-c247c09c0cdd"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707760 4706 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707800 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rf82n\" (UniqueName: \"kubernetes.io/projected/53ac9b54-4c61-4101-96d0-c247c09c0cdd-kube-api-access-rf82n\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707812 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707821 4706 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ssh-key\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707829 4706 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707838 4706 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/53ac9b54-4c61-4101-96d0-c247c09c0cdd-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.707848 4706 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/53ac9b54-4c61-4101-96d0-c247c09c0cdd-ca-certs\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.726465 4706 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Dec 06 06:27:43 crc kubenswrapper[4706]: I1206 06:27:43.809574 4706 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Dec 06 06:27:44 crc kubenswrapper[4706]: I1206 06:27:44.072651 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"53ac9b54-4c61-4101-96d0-c247c09c0cdd","Type":"ContainerDied","Data":"7c1170816c2c2c1df98e36760e69b13eef40684bb3cec5394a50136ce67766ce"} Dec 06 06:27:44 crc kubenswrapper[4706]: I1206 06:27:44.073231 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7c1170816c2c2c1df98e36760e69b13eef40684bb3cec5394a50136ce67766ce" Dec 06 06:27:44 crc kubenswrapper[4706]: I1206 06:27:44.072691 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Dec 06 06:27:45 crc kubenswrapper[4706]: I1206 06:27:45.036094 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:27:45 crc kubenswrapper[4706]: E1206 06:27:45.036409 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.979269 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980470 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="extract-content" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980486 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="extract-content" Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980499 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="extract-utilities" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980506 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="extract-utilities" Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980522 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="registry-server" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980529 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="registry-server" Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980549 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="extract-content" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980566 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="extract-content" Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980584 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ac9b54-4c61-4101-96d0-c247c09c0cdd" containerName="tempest-tests-tempest-tests-runner" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980590 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ac9b54-4c61-4101-96d0-c247c09c0cdd" containerName="tempest-tests-tempest-tests-runner" Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980608 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="registry-server" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980614 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="registry-server" Dec 06 06:27:50 crc kubenswrapper[4706]: E1206 06:27:50.980632 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="extract-utilities" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980639 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="extract-utilities" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980855 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="53ac9b54-4c61-4101-96d0-c247c09c0cdd" containerName="tempest-tests-tempest-tests-runner" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980869 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c55d489f-98e0-4e7a-a63b-250475c8e7e9" containerName="registry-server" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.980882 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="35ac058f-99eb-4655-9a22-1fc470b2c295" containerName="registry-server" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.981574 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.984269 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-54qsk" Dec 06 06:27:50 crc kubenswrapper[4706]: I1206 06:27:50.989588 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.048763 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qf5hs\" (UniqueName: \"kubernetes.io/projected/5711989a-45c2-4c7f-b728-3d5c0eb851a6-kube-api-access-qf5hs\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.049024 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.151444 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.151542 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qf5hs\" (UniqueName: \"kubernetes.io/projected/5711989a-45c2-4c7f-b728-3d5c0eb851a6-kube-api-access-qf5hs\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.152317 4706 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.176631 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qf5hs\" (UniqueName: \"kubernetes.io/projected/5711989a-45c2-4c7f-b728-3d5c0eb851a6-kube-api-access-qf5hs\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.179202 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"5711989a-45c2-4c7f-b728-3d5c0eb851a6\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.298844 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Dec 06 06:27:51 crc kubenswrapper[4706]: I1206 06:27:51.759069 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Dec 06 06:27:52 crc kubenswrapper[4706]: I1206 06:27:52.098929 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:27:52 crc kubenswrapper[4706]: I1206 06:27:52.138096 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5711989a-45c2-4c7f-b728-3d5c0eb851a6","Type":"ContainerStarted","Data":"5bb9931a39a289ba96f4a1f78330b418349ea9583f74bd64df9900184f0afa3a"} Dec 06 06:27:54 crc kubenswrapper[4706]: I1206 06:27:54.158224 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"5711989a-45c2-4c7f-b728-3d5c0eb851a6","Type":"ContainerStarted","Data":"a8e217b2e59d0db73aaf8ee5950d443af5ce1adf434c5338ecebfeb270d84515"} Dec 06 06:27:54 crc kubenswrapper[4706]: I1206 06:27:54.174640 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=3.07784008 podStartE2EDuration="4.174613318s" podCreationTimestamp="2025-12-06 06:27:50 +0000 UTC" firstStartedPulling="2025-12-06 06:27:52.098715509 +0000 UTC m=+4094.426539453" lastFinishedPulling="2025-12-06 06:27:53.195488747 +0000 UTC m=+4095.523312691" observedRunningTime="2025-12-06 06:27:54.171041831 +0000 UTC m=+4096.498865775" watchObservedRunningTime="2025-12-06 06:27:54.174613318 +0000 UTC m=+4096.502437272" Dec 06 06:27:58 crc kubenswrapper[4706]: I1206 06:27:58.043113 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:27:58 crc kubenswrapper[4706]: E1206 06:27:58.043708 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:28:13 crc kubenswrapper[4706]: I1206 06:28:13.037032 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:28:13 crc kubenswrapper[4706]: E1206 06:28:13.038089 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.691486 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pbc2v/must-gather-8nq8m"] Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.694077 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.697240 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pbc2v"/"openshift-service-ca.crt" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.697317 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-pbc2v"/"default-dockercfg-xhss6" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.697443 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-pbc2v"/"kube-root-ca.crt" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.701616 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pbc2v/must-gather-8nq8m"] Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.713623 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ad589568-2d22-488b-a9c0-bb9b091f28a6-must-gather-output\") pod \"must-gather-8nq8m\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.713748 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxhjl\" (UniqueName: \"kubernetes.io/projected/ad589568-2d22-488b-a9c0-bb9b091f28a6-kube-api-access-rxhjl\") pod \"must-gather-8nq8m\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.815775 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxhjl\" (UniqueName: \"kubernetes.io/projected/ad589568-2d22-488b-a9c0-bb9b091f28a6-kube-api-access-rxhjl\") pod \"must-gather-8nq8m\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.815937 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ad589568-2d22-488b-a9c0-bb9b091f28a6-must-gather-output\") pod \"must-gather-8nq8m\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.816428 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ad589568-2d22-488b-a9c0-bb9b091f28a6-must-gather-output\") pod \"must-gather-8nq8m\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:15 crc kubenswrapper[4706]: I1206 06:28:15.852934 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxhjl\" (UniqueName: \"kubernetes.io/projected/ad589568-2d22-488b-a9c0-bb9b091f28a6-kube-api-access-rxhjl\") pod \"must-gather-8nq8m\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:16 crc kubenswrapper[4706]: I1206 06:28:16.016804 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:28:16 crc kubenswrapper[4706]: I1206 06:28:16.453823 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-pbc2v/must-gather-8nq8m"] Dec 06 06:28:17 crc kubenswrapper[4706]: I1206 06:28:17.352539 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" event={"ID":"ad589568-2d22-488b-a9c0-bb9b091f28a6","Type":"ContainerStarted","Data":"305fa98e93d7917364c72945e09da5be33dec9749e7899aa76e61ca7cb321733"} Dec 06 06:28:21 crc kubenswrapper[4706]: I1206 06:28:21.390426 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" event={"ID":"ad589568-2d22-488b-a9c0-bb9b091f28a6","Type":"ContainerStarted","Data":"f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4"} Dec 06 06:28:21 crc kubenswrapper[4706]: I1206 06:28:21.390924 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" event={"ID":"ad589568-2d22-488b-a9c0-bb9b091f28a6","Type":"ContainerStarted","Data":"183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102"} Dec 06 06:28:21 crc kubenswrapper[4706]: I1206 06:28:21.409458 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" podStartSLOduration=2.851882941 podStartE2EDuration="6.409441547s" podCreationTimestamp="2025-12-06 06:28:15 +0000 UTC" firstStartedPulling="2025-12-06 06:28:16.801002086 +0000 UTC m=+4119.128826030" lastFinishedPulling="2025-12-06 06:28:20.358560692 +0000 UTC m=+4122.686384636" observedRunningTime="2025-12-06 06:28:21.403821015 +0000 UTC m=+4123.731644969" watchObservedRunningTime="2025-12-06 06:28:21.409441547 +0000 UTC m=+4123.737265491" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.212261 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-njnz5"] Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.213847 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.299529 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4rdc\" (UniqueName: \"kubernetes.io/projected/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-kube-api-access-j4rdc\") pod \"crc-debug-njnz5\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.299670 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-host\") pod \"crc-debug-njnz5\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.401838 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4rdc\" (UniqueName: \"kubernetes.io/projected/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-kube-api-access-j4rdc\") pod \"crc-debug-njnz5\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.401916 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-host\") pod \"crc-debug-njnz5\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.402029 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-host\") pod \"crc-debug-njnz5\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.432867 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4rdc\" (UniqueName: \"kubernetes.io/projected/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-kube-api-access-j4rdc\") pod \"crc-debug-njnz5\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: I1206 06:28:24.534592 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:28:24 crc kubenswrapper[4706]: W1206 06:28:24.567858 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8fd0eb8a_908f_4adb_bbec_f5939cae5f0c.slice/crio-33570c8c37928e55cdc6c6a9221e75490d223fa90945ec196d091b1d9b60515f WatchSource:0}: Error finding container 33570c8c37928e55cdc6c6a9221e75490d223fa90945ec196d091b1d9b60515f: Status 404 returned error can't find the container with id 33570c8c37928e55cdc6c6a9221e75490d223fa90945ec196d091b1d9b60515f Dec 06 06:28:25 crc kubenswrapper[4706]: I1206 06:28:25.424524 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" event={"ID":"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c","Type":"ContainerStarted","Data":"33570c8c37928e55cdc6c6a9221e75490d223fa90945ec196d091b1d9b60515f"} Dec 06 06:28:26 crc kubenswrapper[4706]: E1206 06:28:26.048575 4706 upgradeaware.go:441] Error proxying data from backend to client: writeto tcp 38.102.83.23:36728->38.102.83.23:37957: read tcp 38.102.83.23:36728->38.102.83.23:37957: read: connection reset by peer Dec 06 06:28:26 crc kubenswrapper[4706]: E1206 06:28:26.048758 4706 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.23:36728->38.102.83.23:37957: write tcp 38.102.83.23:36728->38.102.83.23:37957: write: broken pipe Dec 06 06:28:27 crc kubenswrapper[4706]: I1206 06:28:27.036637 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:28:27 crc kubenswrapper[4706]: E1206 06:28:27.037582 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:28:36 crc kubenswrapper[4706]: I1206 06:28:36.521546 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" event={"ID":"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c","Type":"ContainerStarted","Data":"8393625bea98b924f1b88e544162ba373f7c3ce3addcdbf2bead211eba6d84af"} Dec 06 06:28:36 crc kubenswrapper[4706]: I1206 06:28:36.536117 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" podStartSLOduration=1.82310367 podStartE2EDuration="12.536099481s" podCreationTimestamp="2025-12-06 06:28:24 +0000 UTC" firstStartedPulling="2025-12-06 06:28:24.573728317 +0000 UTC m=+4126.901552261" lastFinishedPulling="2025-12-06 06:28:35.286724128 +0000 UTC m=+4137.614548072" observedRunningTime="2025-12-06 06:28:36.533142162 +0000 UTC m=+4138.860966116" watchObservedRunningTime="2025-12-06 06:28:36.536099481 +0000 UTC m=+4138.863923425" Dec 06 06:28:40 crc kubenswrapper[4706]: I1206 06:28:40.036324 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:28:40 crc kubenswrapper[4706]: E1206 06:28:40.037207 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.505163 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9jkmb"] Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.511070 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.519948 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jkmb"] Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.650897 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-utilities\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.651040 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nv45d\" (UniqueName: \"kubernetes.io/projected/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-kube-api-access-nv45d\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.651112 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-catalog-content\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.753220 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-utilities\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.753381 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nv45d\" (UniqueName: \"kubernetes.io/projected/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-kube-api-access-nv45d\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.753438 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-catalog-content\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.754022 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-utilities\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.754078 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-catalog-content\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.785743 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nv45d\" (UniqueName: \"kubernetes.io/projected/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-kube-api-access-nv45d\") pod \"redhat-marketplace-9jkmb\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:47 crc kubenswrapper[4706]: I1206 06:28:47.837900 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:51 crc kubenswrapper[4706]: I1206 06:28:51.036824 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:28:51 crc kubenswrapper[4706]: E1206 06:28:51.037749 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:28:51 crc kubenswrapper[4706]: I1206 06:28:51.823550 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jkmb"] Dec 06 06:28:51 crc kubenswrapper[4706]: W1206 06:28:51.832397 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc8fc2d1e_f7e8_4f91_abfb_b6593d7b8a4f.slice/crio-31760d1b2a90fd583cda6653db4a95923402fd7ba4d1e3942b96729c3b435d0c WatchSource:0}: Error finding container 31760d1b2a90fd583cda6653db4a95923402fd7ba4d1e3942b96729c3b435d0c: Status 404 returned error can't find the container with id 31760d1b2a90fd583cda6653db4a95923402fd7ba4d1e3942b96729c3b435d0c Dec 06 06:28:52 crc kubenswrapper[4706]: I1206 06:28:52.668675 4706 generic.go:334] "Generic (PLEG): container finished" podID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerID="50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501" exitCode=0 Dec 06 06:28:52 crc kubenswrapper[4706]: I1206 06:28:52.669195 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jkmb" event={"ID":"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f","Type":"ContainerDied","Data":"50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501"} Dec 06 06:28:52 crc kubenswrapper[4706]: I1206 06:28:52.669235 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jkmb" event={"ID":"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f","Type":"ContainerStarted","Data":"31760d1b2a90fd583cda6653db4a95923402fd7ba4d1e3942b96729c3b435d0c"} Dec 06 06:28:54 crc kubenswrapper[4706]: I1206 06:28:54.691505 4706 generic.go:334] "Generic (PLEG): container finished" podID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerID="30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f" exitCode=0 Dec 06 06:28:54 crc kubenswrapper[4706]: I1206 06:28:54.691593 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jkmb" event={"ID":"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f","Type":"ContainerDied","Data":"30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f"} Dec 06 06:28:55 crc kubenswrapper[4706]: I1206 06:28:55.702311 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jkmb" event={"ID":"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f","Type":"ContainerStarted","Data":"997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107"} Dec 06 06:28:55 crc kubenswrapper[4706]: I1206 06:28:55.726218 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9jkmb" podStartSLOduration=5.97645761 podStartE2EDuration="8.726198768s" podCreationTimestamp="2025-12-06 06:28:47 +0000 UTC" firstStartedPulling="2025-12-06 06:28:52.670843 +0000 UTC m=+4154.998666944" lastFinishedPulling="2025-12-06 06:28:55.420584158 +0000 UTC m=+4157.748408102" observedRunningTime="2025-12-06 06:28:55.719154778 +0000 UTC m=+4158.046978732" watchObservedRunningTime="2025-12-06 06:28:55.726198768 +0000 UTC m=+4158.054022702" Dec 06 06:28:57 crc kubenswrapper[4706]: I1206 06:28:57.838335 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:57 crc kubenswrapper[4706]: I1206 06:28:57.838723 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:28:57 crc kubenswrapper[4706]: I1206 06:28:57.894355 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:29:02 crc kubenswrapper[4706]: I1206 06:29:02.036733 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:29:02 crc kubenswrapper[4706]: E1206 06:29:02.037390 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:29:07 crc kubenswrapper[4706]: I1206 06:29:07.884222 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:29:07 crc kubenswrapper[4706]: I1206 06:29:07.950139 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jkmb"] Dec 06 06:29:08 crc kubenswrapper[4706]: I1206 06:29:08.804548 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9jkmb" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="registry-server" containerID="cri-o://997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107" gracePeriod=2 Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.407323 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.526326 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-utilities\") pod \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.526377 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-catalog-content\") pod \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.526409 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nv45d\" (UniqueName: \"kubernetes.io/projected/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-kube-api-access-nv45d\") pod \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\" (UID: \"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f\") " Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.528532 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-utilities" (OuterVolumeSpecName: "utilities") pod "c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" (UID: "c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.532038 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-kube-api-access-nv45d" (OuterVolumeSpecName: "kube-api-access-nv45d") pod "c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" (UID: "c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f"). InnerVolumeSpecName "kube-api-access-nv45d". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.560039 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" (UID: "c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.628537 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.628946 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.628965 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nv45d\" (UniqueName: \"kubernetes.io/projected/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f-kube-api-access-nv45d\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.815363 4706 generic.go:334] "Generic (PLEG): container finished" podID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerID="997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107" exitCode=0 Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.815412 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jkmb" event={"ID":"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f","Type":"ContainerDied","Data":"997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107"} Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.815442 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9jkmb" event={"ID":"c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f","Type":"ContainerDied","Data":"31760d1b2a90fd583cda6653db4a95923402fd7ba4d1e3942b96729c3b435d0c"} Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.815461 4706 scope.go:117] "RemoveContainer" containerID="997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.815601 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9jkmb" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.843222 4706 scope.go:117] "RemoveContainer" containerID="30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.869072 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jkmb"] Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.874713 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9jkmb"] Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.881394 4706 scope.go:117] "RemoveContainer" containerID="50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.924542 4706 scope.go:117] "RemoveContainer" containerID="997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107" Dec 06 06:29:09 crc kubenswrapper[4706]: E1206 06:29:09.925113 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107\": container with ID starting with 997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107 not found: ID does not exist" containerID="997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.925216 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107"} err="failed to get container status \"997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107\": rpc error: code = NotFound desc = could not find container \"997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107\": container with ID starting with 997d2e1d4a84586b6d381411b7f896e38065d5506e32d11b8d6a5714db291107 not found: ID does not exist" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.925298 4706 scope.go:117] "RemoveContainer" containerID="30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f" Dec 06 06:29:09 crc kubenswrapper[4706]: E1206 06:29:09.925669 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f\": container with ID starting with 30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f not found: ID does not exist" containerID="30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.925768 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f"} err="failed to get container status \"30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f\": rpc error: code = NotFound desc = could not find container \"30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f\": container with ID starting with 30ff7f7398c258700c7110de0026c1532379f05d8ec7a8dc9d088d8ee8fbe27f not found: ID does not exist" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.925855 4706 scope.go:117] "RemoveContainer" containerID="50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501" Dec 06 06:29:09 crc kubenswrapper[4706]: E1206 06:29:09.926171 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501\": container with ID starting with 50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501 not found: ID does not exist" containerID="50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501" Dec 06 06:29:09 crc kubenswrapper[4706]: I1206 06:29:09.926264 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501"} err="failed to get container status \"50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501\": rpc error: code = NotFound desc = could not find container \"50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501\": container with ID starting with 50c4c225b39c5ec34a246801aedbb80acf0e9a4a1ce0662460bb303a8a94b501 not found: ID does not exist" Dec 06 06:29:10 crc kubenswrapper[4706]: I1206 06:29:10.046769 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" path="/var/lib/kubelet/pods/c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f/volumes" Dec 06 06:29:14 crc kubenswrapper[4706]: I1206 06:29:14.036351 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:29:14 crc kubenswrapper[4706]: I1206 06:29:14.872642 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"0712b5d6dd3cf50d5e8260c2d90bbd3bd3e7c8f2f055b870399a5e8978ffe708"} Dec 06 06:29:26 crc kubenswrapper[4706]: I1206 06:29:26.972891 4706 generic.go:334] "Generic (PLEG): container finished" podID="8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" containerID="8393625bea98b924f1b88e544162ba373f7c3ce3addcdbf2bead211eba6d84af" exitCode=0 Dec 06 06:29:26 crc kubenswrapper[4706]: I1206 06:29:26.972983 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" event={"ID":"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c","Type":"ContainerDied","Data":"8393625bea98b924f1b88e544162ba373f7c3ce3addcdbf2bead211eba6d84af"} Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.182599 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4tph7"] Dec 06 06:29:27 crc kubenswrapper[4706]: E1206 06:29:27.183049 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="extract-utilities" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.183070 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="extract-utilities" Dec 06 06:29:27 crc kubenswrapper[4706]: E1206 06:29:27.183097 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="registry-server" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.183106 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="registry-server" Dec 06 06:29:27 crc kubenswrapper[4706]: E1206 06:29:27.183120 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="extract-content" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.183127 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="extract-content" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.183349 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c8fc2d1e-f7e8-4f91-abfb-b6593d7b8a4f" containerName="registry-server" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.184787 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.199235 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4tph7"] Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.280147 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-utilities\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.280280 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-catalog-content\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.280414 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjzq4\" (UniqueName: \"kubernetes.io/projected/390cc982-f29d-4ace-b23c-0c77b0bbc87e-kube-api-access-fjzq4\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.381947 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-utilities\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.382043 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-catalog-content\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.382139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjzq4\" (UniqueName: \"kubernetes.io/projected/390cc982-f29d-4ace-b23c-0c77b0bbc87e-kube-api-access-fjzq4\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.382759 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-utilities\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.382860 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-catalog-content\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.406515 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjzq4\" (UniqueName: \"kubernetes.io/projected/390cc982-f29d-4ace-b23c-0c77b0bbc87e-kube-api-access-fjzq4\") pod \"community-operators-4tph7\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:27 crc kubenswrapper[4706]: I1206 06:29:27.510709 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.026003 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4tph7"] Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.089929 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.136204 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-njnz5"] Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.144719 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-njnz5"] Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.204902 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-host\") pod \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.205063 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-host" (OuterVolumeSpecName: "host") pod "8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" (UID: "8fd0eb8a-908f-4adb-bbec-f5939cae5f0c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.205144 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4rdc\" (UniqueName: \"kubernetes.io/projected/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-kube-api-access-j4rdc\") pod \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\" (UID: \"8fd0eb8a-908f-4adb-bbec-f5939cae5f0c\") " Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.205501 4706 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-host\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.212257 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-kube-api-access-j4rdc" (OuterVolumeSpecName: "kube-api-access-j4rdc") pod "8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" (UID: "8fd0eb8a-908f-4adb-bbec-f5939cae5f0c"). InnerVolumeSpecName "kube-api-access-j4rdc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:29:28 crc kubenswrapper[4706]: I1206 06:29:28.307073 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4rdc\" (UniqueName: \"kubernetes.io/projected/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c-kube-api-access-j4rdc\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:28.999544 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="33570c8c37928e55cdc6c6a9221e75490d223fa90945ec196d091b1d9b60515f" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:28.999577 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-njnz5" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.001572 4706 generic.go:334] "Generic (PLEG): container finished" podID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerID="97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d" exitCode=0 Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.001608 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4tph7" event={"ID":"390cc982-f29d-4ace-b23c-0c77b0bbc87e","Type":"ContainerDied","Data":"97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d"} Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.001632 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4tph7" event={"ID":"390cc982-f29d-4ace-b23c-0c77b0bbc87e","Type":"ContainerStarted","Data":"33b76e247bded5cbead83ab23fa25ffc8e33d18064f06083599a9df8bf42f7d0"} Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.279031 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-c5wpj"] Dec 06 06:29:29 crc kubenswrapper[4706]: E1206 06:29:29.279528 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" containerName="container-00" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.279544 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" containerName="container-00" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.279800 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" containerName="container-00" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.280583 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.325312 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwf7c\" (UniqueName: \"kubernetes.io/projected/6c767422-854e-45fd-a5a2-d7d061c54829-kube-api-access-wwf7c\") pod \"crc-debug-c5wpj\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.325499 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c767422-854e-45fd-a5a2-d7d061c54829-host\") pod \"crc-debug-c5wpj\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.428065 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwf7c\" (UniqueName: \"kubernetes.io/projected/6c767422-854e-45fd-a5a2-d7d061c54829-kube-api-access-wwf7c\") pod \"crc-debug-c5wpj\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.428856 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c767422-854e-45fd-a5a2-d7d061c54829-host\") pod \"crc-debug-c5wpj\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.428978 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c767422-854e-45fd-a5a2-d7d061c54829-host\") pod \"crc-debug-c5wpj\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.449153 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwf7c\" (UniqueName: \"kubernetes.io/projected/6c767422-854e-45fd-a5a2-d7d061c54829-kube-api-access-wwf7c\") pod \"crc-debug-c5wpj\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:29 crc kubenswrapper[4706]: I1206 06:29:29.609007 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:30 crc kubenswrapper[4706]: I1206 06:29:30.012639 4706 generic.go:334] "Generic (PLEG): container finished" podID="6c767422-854e-45fd-a5a2-d7d061c54829" containerID="25fef7b3f98a9f2c026eb8f49062ef8c9e962cd50c38b3e0e8c2bbb3234efe70" exitCode=0 Dec 06 06:29:30 crc kubenswrapper[4706]: I1206 06:29:30.012872 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" event={"ID":"6c767422-854e-45fd-a5a2-d7d061c54829","Type":"ContainerDied","Data":"25fef7b3f98a9f2c026eb8f49062ef8c9e962cd50c38b3e0e8c2bbb3234efe70"} Dec 06 06:29:30 crc kubenswrapper[4706]: I1206 06:29:30.013152 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" event={"ID":"6c767422-854e-45fd-a5a2-d7d061c54829","Type":"ContainerStarted","Data":"850e514f31332e8feef5506274c3ce797c368e6ba47222ef5ea77fc6bac0884f"} Dec 06 06:29:30 crc kubenswrapper[4706]: I1206 06:29:30.057325 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fd0eb8a-908f-4adb-bbec-f5939cae5f0c" path="/var/lib/kubelet/pods/8fd0eb8a-908f-4adb-bbec-f5939cae5f0c/volumes" Dec 06 06:29:30 crc kubenswrapper[4706]: I1206 06:29:30.509276 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-c5wpj"] Dec 06 06:29:30 crc kubenswrapper[4706]: I1206 06:29:30.517967 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-c5wpj"] Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.027076 4706 generic.go:334] "Generic (PLEG): container finished" podID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerID="1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd" exitCode=0 Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.027149 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4tph7" event={"ID":"390cc982-f29d-4ace-b23c-0c77b0bbc87e","Type":"ContainerDied","Data":"1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd"} Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.134881 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.267777 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c767422-854e-45fd-a5a2-d7d061c54829-host\") pod \"6c767422-854e-45fd-a5a2-d7d061c54829\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.267838 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6c767422-854e-45fd-a5a2-d7d061c54829-host" (OuterVolumeSpecName: "host") pod "6c767422-854e-45fd-a5a2-d7d061c54829" (UID: "6c767422-854e-45fd-a5a2-d7d061c54829"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.267946 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwf7c\" (UniqueName: \"kubernetes.io/projected/6c767422-854e-45fd-a5a2-d7d061c54829-kube-api-access-wwf7c\") pod \"6c767422-854e-45fd-a5a2-d7d061c54829\" (UID: \"6c767422-854e-45fd-a5a2-d7d061c54829\") " Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.268489 4706 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6c767422-854e-45fd-a5a2-d7d061c54829-host\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.273139 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c767422-854e-45fd-a5a2-d7d061c54829-kube-api-access-wwf7c" (OuterVolumeSpecName: "kube-api-access-wwf7c") pod "6c767422-854e-45fd-a5a2-d7d061c54829" (UID: "6c767422-854e-45fd-a5a2-d7d061c54829"). InnerVolumeSpecName "kube-api-access-wwf7c". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.370331 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwf7c\" (UniqueName: \"kubernetes.io/projected/6c767422-854e-45fd-a5a2-d7d061c54829-kube-api-access-wwf7c\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.693821 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-m9qtr"] Dec 06 06:29:31 crc kubenswrapper[4706]: E1206 06:29:31.694981 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c767422-854e-45fd-a5a2-d7d061c54829" containerName="container-00" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.695061 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c767422-854e-45fd-a5a2-d7d061c54829" containerName="container-00" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.695398 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c767422-854e-45fd-a5a2-d7d061c54829" containerName="container-00" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.696153 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.877460 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5fc6d73-f4ef-4f6f-8e6b-545112082808-host\") pod \"crc-debug-m9qtr\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.877890 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svfsc\" (UniqueName: \"kubernetes.io/projected/d5fc6d73-f4ef-4f6f-8e6b-545112082808-kube-api-access-svfsc\") pod \"crc-debug-m9qtr\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.980325 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svfsc\" (UniqueName: \"kubernetes.io/projected/d5fc6d73-f4ef-4f6f-8e6b-545112082808-kube-api-access-svfsc\") pod \"crc-debug-m9qtr\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.980453 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5fc6d73-f4ef-4f6f-8e6b-545112082808-host\") pod \"crc-debug-m9qtr\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:31 crc kubenswrapper[4706]: I1206 06:29:31.980733 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5fc6d73-f4ef-4f6f-8e6b-545112082808-host\") pod \"crc-debug-m9qtr\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.003380 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svfsc\" (UniqueName: \"kubernetes.io/projected/d5fc6d73-f4ef-4f6f-8e6b-545112082808-kube-api-access-svfsc\") pod \"crc-debug-m9qtr\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.014395 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.043929 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.057008 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c767422-854e-45fd-a5a2-d7d061c54829" path="/var/lib/kubelet/pods/6c767422-854e-45fd-a5a2-d7d061c54829/volumes" Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.057761 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4tph7" event={"ID":"390cc982-f29d-4ace-b23c-0c77b0bbc87e","Type":"ContainerStarted","Data":"ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f"} Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.057806 4706 scope.go:117] "RemoveContainer" containerID="25fef7b3f98a9f2c026eb8f49062ef8c9e962cd50c38b3e0e8c2bbb3234efe70" Dec 06 06:29:32 crc kubenswrapper[4706]: I1206 06:29:32.082013 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4tph7" podStartSLOduration=2.58314785 podStartE2EDuration="5.081993829s" podCreationTimestamp="2025-12-06 06:29:27 +0000 UTC" firstStartedPulling="2025-12-06 06:29:29.003480533 +0000 UTC m=+4191.331304477" lastFinishedPulling="2025-12-06 06:29:31.502326452 +0000 UTC m=+4193.830150456" observedRunningTime="2025-12-06 06:29:32.078521105 +0000 UTC m=+4194.406345059" watchObservedRunningTime="2025-12-06 06:29:32.081993829 +0000 UTC m=+4194.409817783" Dec 06 06:29:33 crc kubenswrapper[4706]: I1206 06:29:33.058201 4706 generic.go:334] "Generic (PLEG): container finished" podID="d5fc6d73-f4ef-4f6f-8e6b-545112082808" containerID="fa81850a001348d1d50ffbd86f12c84b67307d978a62313a6a43a35957048528" exitCode=0 Dec 06 06:29:33 crc kubenswrapper[4706]: I1206 06:29:33.058297 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" event={"ID":"d5fc6d73-f4ef-4f6f-8e6b-545112082808","Type":"ContainerDied","Data":"fa81850a001348d1d50ffbd86f12c84b67307d978a62313a6a43a35957048528"} Dec 06 06:29:33 crc kubenswrapper[4706]: I1206 06:29:33.058598 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" event={"ID":"d5fc6d73-f4ef-4f6f-8e6b-545112082808","Type":"ContainerStarted","Data":"ae4b224677109dc2bb609ff57c5b0c612a03c5e01652195ebb5107a4d81bdedb"} Dec 06 06:29:33 crc kubenswrapper[4706]: I1206 06:29:33.101869 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-m9qtr"] Dec 06 06:29:33 crc kubenswrapper[4706]: I1206 06:29:33.113456 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pbc2v/crc-debug-m9qtr"] Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.191304 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.221802 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-svfsc\" (UniqueName: \"kubernetes.io/projected/d5fc6d73-f4ef-4f6f-8e6b-545112082808-kube-api-access-svfsc\") pod \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.222117 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5fc6d73-f4ef-4f6f-8e6b-545112082808-host\") pod \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\" (UID: \"d5fc6d73-f4ef-4f6f-8e6b-545112082808\") " Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.222158 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d5fc6d73-f4ef-4f6f-8e6b-545112082808-host" (OuterVolumeSpecName: "host") pod "d5fc6d73-f4ef-4f6f-8e6b-545112082808" (UID: "d5fc6d73-f4ef-4f6f-8e6b-545112082808"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.222616 4706 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d5fc6d73-f4ef-4f6f-8e6b-545112082808-host\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.227148 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5fc6d73-f4ef-4f6f-8e6b-545112082808-kube-api-access-svfsc" (OuterVolumeSpecName: "kube-api-access-svfsc") pod "d5fc6d73-f4ef-4f6f-8e6b-545112082808" (UID: "d5fc6d73-f4ef-4f6f-8e6b-545112082808"). InnerVolumeSpecName "kube-api-access-svfsc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:29:34 crc kubenswrapper[4706]: I1206 06:29:34.324273 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-svfsc\" (UniqueName: \"kubernetes.io/projected/d5fc6d73-f4ef-4f6f-8e6b-545112082808-kube-api-access-svfsc\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:35 crc kubenswrapper[4706]: I1206 06:29:35.076983 4706 scope.go:117] "RemoveContainer" containerID="fa81850a001348d1d50ffbd86f12c84b67307d978a62313a6a43a35957048528" Dec 06 06:29:35 crc kubenswrapper[4706]: I1206 06:29:35.077405 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-m9qtr" Dec 06 06:29:36 crc kubenswrapper[4706]: I1206 06:29:36.047399 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5fc6d73-f4ef-4f6f-8e6b-545112082808" path="/var/lib/kubelet/pods/d5fc6d73-f4ef-4f6f-8e6b-545112082808/volumes" Dec 06 06:29:37 crc kubenswrapper[4706]: I1206 06:29:37.511514 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:37 crc kubenswrapper[4706]: I1206 06:29:37.511791 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:37 crc kubenswrapper[4706]: I1206 06:29:37.569261 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:38 crc kubenswrapper[4706]: I1206 06:29:38.157087 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:38 crc kubenswrapper[4706]: I1206 06:29:38.202483 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4tph7"] Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.120876 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4tph7" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="registry-server" containerID="cri-o://ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f" gracePeriod=2 Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.611883 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.732772 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjzq4\" (UniqueName: \"kubernetes.io/projected/390cc982-f29d-4ace-b23c-0c77b0bbc87e-kube-api-access-fjzq4\") pod \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.733003 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-catalog-content\") pod \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.733121 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-utilities\") pod \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\" (UID: \"390cc982-f29d-4ace-b23c-0c77b0bbc87e\") " Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.733817 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-utilities" (OuterVolumeSpecName: "utilities") pod "390cc982-f29d-4ace-b23c-0c77b0bbc87e" (UID: "390cc982-f29d-4ace-b23c-0c77b0bbc87e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.739378 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/390cc982-f29d-4ace-b23c-0c77b0bbc87e-kube-api-access-fjzq4" (OuterVolumeSpecName: "kube-api-access-fjzq4") pod "390cc982-f29d-4ace-b23c-0c77b0bbc87e" (UID: "390cc982-f29d-4ace-b23c-0c77b0bbc87e"). InnerVolumeSpecName "kube-api-access-fjzq4". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.792238 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "390cc982-f29d-4ace-b23c-0c77b0bbc87e" (UID: "390cc982-f29d-4ace-b23c-0c77b0bbc87e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.835433 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.835471 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjzq4\" (UniqueName: \"kubernetes.io/projected/390cc982-f29d-4ace-b23c-0c77b0bbc87e-kube-api-access-fjzq4\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:40 crc kubenswrapper[4706]: I1206 06:29:40.835482 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/390cc982-f29d-4ace-b23c-0c77b0bbc87e-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.134381 4706 generic.go:334] "Generic (PLEG): container finished" podID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerID="ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f" exitCode=0 Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.134427 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4tph7" event={"ID":"390cc982-f29d-4ace-b23c-0c77b0bbc87e","Type":"ContainerDied","Data":"ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f"} Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.134455 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4tph7" event={"ID":"390cc982-f29d-4ace-b23c-0c77b0bbc87e","Type":"ContainerDied","Data":"33b76e247bded5cbead83ab23fa25ffc8e33d18064f06083599a9df8bf42f7d0"} Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.134471 4706 scope.go:117] "RemoveContainer" containerID="ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.134602 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4tph7" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.164800 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4tph7"] Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.167004 4706 scope.go:117] "RemoveContainer" containerID="1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.175903 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4tph7"] Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.206181 4706 scope.go:117] "RemoveContainer" containerID="97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.223765 4706 scope.go:117] "RemoveContainer" containerID="ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f" Dec 06 06:29:41 crc kubenswrapper[4706]: E1206 06:29:41.224356 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f\": container with ID starting with ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f not found: ID does not exist" containerID="ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.224406 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f"} err="failed to get container status \"ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f\": rpc error: code = NotFound desc = could not find container \"ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f\": container with ID starting with ead383b1f939531e7e74ad0ae4004f804dc1a9088cdc1f47cb7f94e43d76764f not found: ID does not exist" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.224453 4706 scope.go:117] "RemoveContainer" containerID="1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd" Dec 06 06:29:41 crc kubenswrapper[4706]: E1206 06:29:41.225074 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd\": container with ID starting with 1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd not found: ID does not exist" containerID="1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.225110 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd"} err="failed to get container status \"1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd\": rpc error: code = NotFound desc = could not find container \"1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd\": container with ID starting with 1cf37d877083720c843486640c780e469e0077c8de20d58d5fe4080f143bfedd not found: ID does not exist" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.225132 4706 scope.go:117] "RemoveContainer" containerID="97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d" Dec 06 06:29:41 crc kubenswrapper[4706]: E1206 06:29:41.225383 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d\": container with ID starting with 97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d not found: ID does not exist" containerID="97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d" Dec 06 06:29:41 crc kubenswrapper[4706]: I1206 06:29:41.225413 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d"} err="failed to get container status \"97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d\": rpc error: code = NotFound desc = could not find container \"97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d\": container with ID starting with 97b3156783a95899d4d9b69bd65321e1b3b3ee31c6036ea7f9521f49ec66553d not found: ID does not exist" Dec 06 06:29:42 crc kubenswrapper[4706]: I1206 06:29:42.046322 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" path="/var/lib/kubelet/pods/390cc982-f29d-4ace-b23c-0c77b0bbc87e/volumes" Dec 06 06:29:48 crc kubenswrapper[4706]: I1206 06:29:48.815972 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5d57ffb9bb-t86s7_fb031ade-7dae-40f8-a748-8842d00f6a37/barbican-api/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.050747 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5d57ffb9bb-t86s7_fb031ade-7dae-40f8-a748-8842d00f6a37/barbican-api-log/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.104576 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5985b9fc68-gt5hx_99fd71cd-f273-4e5f-91e1-2816f523b9ce/barbican-keystone-listener/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.169336 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5985b9fc68-gt5hx_99fd71cd-f273-4e5f-91e1-2816f523b9ce/barbican-keystone-listener-log/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.284625 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-99698bc47-f5twk_7ea78fc3-49cb-46cb-a450-c3c0990135fb/barbican-worker/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.330394 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-99698bc47-f5twk_7ea78fc3-49cb-46cb-a450-c3c0990135fb/barbican-worker-log/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.521444 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv_ab55260b-0613-4be9-b0e2-e1470cdb018d/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.595605 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/ceilometer-central-agent/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.671527 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/ceilometer-notification-agent/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.753843 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/proxy-httpd/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.773757 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/sg-core/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.974662 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_3631398b-6bec-44d1-bf3b-19f8e8114c5c/cinder-api/0.log" Dec 06 06:29:49 crc kubenswrapper[4706]: I1206 06:29:49.976980 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_3631398b-6bec-44d1-bf3b-19f8e8114c5c/cinder-api-log/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.057632 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_9823a9c2-7e13-4c23-a9ea-af6e03c32773/cinder-scheduler/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.343403 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_9823a9c2-7e13-4c23-a9ea-af6e03c32773/probe/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.414722 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2_3dc977db-985f-4d5a-8735-0c417c7be72c/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.582915 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9_a71e1253-a40e-4b2b-b911-c15a88da2be5/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.637687 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-cb6ffcf87-5dz8j_e7068fc5-ddf3-4a32-bf1a-803684a95dd3/init/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.826180 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-cb6ffcf87-5dz8j_e7068fc5-ddf3-4a32-bf1a-803684a95dd3/init/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.862345 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x_582f8518-3c87-496d-b057-b2f66658a731/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:50 crc kubenswrapper[4706]: I1206 06:29:50.867789 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-cb6ffcf87-5dz8j_e7068fc5-ddf3-4a32-bf1a-803684a95dd3/dnsmasq-dns/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.054375 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c50f78da-9727-4908-ba76-4a3dbc4455c7/glance-httpd/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.099095 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c50f78da-9727-4908-ba76-4a3dbc4455c7/glance-log/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.269952 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_fdfe9ea0-e897-4071-9b1c-dcdd908b549d/glance-httpd/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.291912 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_fdfe9ea0-e897-4071-9b1c-dcdd908b549d/glance-log/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.594560 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-8f474c4b8-xgvj4_8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f/horizon/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.640851 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg_cc1a17c8-f209-4fb0-9fd5-d17086f90eba/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.835504 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-8f474c4b8-xgvj4_8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f/horizon-log/0.log" Dec 06 06:29:51 crc kubenswrapper[4706]: I1206 06:29:51.874927 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-cpbpm_82ebe200-9dff-4f3b-8bf1-e1a6feee951c/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.135760 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29416681-gqk5x_32439274-bc88-4aa9-b040-98212cda2b38/keystone-cron/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.218348 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6c6f7f7c88-ptmf7_0cbad2bc-87d3-4f51-aed8-36d386af56eb/keystone-api/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.363404 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_66632781-9905-4f3f-8945-92ca177cf2bc/kube-state-metrics/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.433467 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb_5620e36a-01d5-4282-ad0c-a3e96dc38329/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.787337 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c45f4d87f-7sd44_d8a2aaf5-7417-43c4-9562-2df330329adf/neutron-api/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.848815 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c45f4d87f-7sd44_d8a2aaf5-7417-43c4-9562-2df330329adf/neutron-httpd/0.log" Dec 06 06:29:52 crc kubenswrapper[4706]: I1206 06:29:52.870679 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx_d67f85a9-c64e-42f0-b686-bfb179dccc76/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:53 crc kubenswrapper[4706]: I1206 06:29:53.537395 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_635ff13a-9863-4ae2-84df-78df1c359b9e/nova-cell0-conductor-conductor/0.log" Dec 06 06:29:53 crc kubenswrapper[4706]: I1206 06:29:53.550571 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_085d0127-557c-49a2-80f4-2a86fed685cc/nova-api-log/0.log" Dec 06 06:29:53 crc kubenswrapper[4706]: I1206 06:29:53.724325 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_085d0127-557c-49a2-80f4-2a86fed685cc/nova-api-api/0.log" Dec 06 06:29:53 crc kubenswrapper[4706]: I1206 06:29:53.891996 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_30a8debc-3590-46cb-9042-5cf8fe5a87d6/nova-cell1-conductor-conductor/0.log" Dec 06 06:29:53 crc kubenswrapper[4706]: I1206 06:29:53.900093 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_b4142f86-6823-4e49-9a0e-564cdf8d043b/nova-cell1-novncproxy-novncproxy/0.log" Dec 06 06:29:54 crc kubenswrapper[4706]: I1206 06:29:54.101342 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-kfg84_c4a06494-e4f9-427e-b7e2-dad0c843d44a/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:54 crc kubenswrapper[4706]: I1206 06:29:54.271577 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_784eb2e8-d56e-4523-86cf-b67f953db54d/nova-metadata-log/0.log" Dec 06 06:29:54 crc kubenswrapper[4706]: I1206 06:29:54.581136 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_08955916-6689-445e-830d-6fbfe9a2f460/mysql-bootstrap/0.log" Dec 06 06:29:54 crc kubenswrapper[4706]: I1206 06:29:54.728548 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_24bb9983-5fec-49b8-9cff-cb2c111af5b9/nova-scheduler-scheduler/0.log" Dec 06 06:29:54 crc kubenswrapper[4706]: I1206 06:29:54.782587 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_08955916-6689-445e-830d-6fbfe9a2f460/mysql-bootstrap/0.log" Dec 06 06:29:54 crc kubenswrapper[4706]: I1206 06:29:54.829831 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_08955916-6689-445e-830d-6fbfe9a2f460/galera/0.log" Dec 06 06:29:55 crc kubenswrapper[4706]: I1206 06:29:55.483892 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_784eb2e8-d56e-4523-86cf-b67f953db54d/nova-metadata-metadata/0.log" Dec 06 06:29:55 crc kubenswrapper[4706]: I1206 06:29:55.774286 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_74e1bb57-a746-472b-a3b1-ffb875c658e4/mysql-bootstrap/0.log" Dec 06 06:29:55 crc kubenswrapper[4706]: I1206 06:29:55.884384 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_74e1bb57-a746-472b-a3b1-ffb875c658e4/mysql-bootstrap/0.log" Dec 06 06:29:55 crc kubenswrapper[4706]: I1206 06:29:55.925989 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_74e1bb57-a746-472b-a3b1-ffb875c658e4/galera/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.101386 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_2d5b5a38-b853-47de-ada1-1d7c240e84e4/openstackclient/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.164023 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-cbrg2_cde7e1a3-dd72-47aa-a0b5-117bc2c53885/ovn-controller/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.422909 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-5wn4n_bc140eba-adb0-407f-8472-1270d4fc5263/openstack-network-exporter/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.552529 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovsdb-server-init/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.630411 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovsdb-server-init/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.665182 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovs-vswitchd/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.710645 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovsdb-server/0.log" Dec 06 06:29:56 crc kubenswrapper[4706]: I1206 06:29:56.910833 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wb66l_24cc16ad-5e43-4d54-bdf8-69d4f319907c/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:57 crc kubenswrapper[4706]: I1206 06:29:57.488191 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_0d7d6b1e-41f4-4140-a752-bcf110cf3bd5/openstack-network-exporter/0.log" Dec 06 06:29:57 crc kubenswrapper[4706]: I1206 06:29:57.609325 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_0d7d6b1e-41f4-4140-a752-bcf110cf3bd5/ovn-northd/0.log" Dec 06 06:29:57 crc kubenswrapper[4706]: I1206 06:29:57.679978 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c330d787-77c8-4014-85a5-7d1bcf73836b/openstack-network-exporter/0.log" Dec 06 06:29:57 crc kubenswrapper[4706]: I1206 06:29:57.714763 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c330d787-77c8-4014-85a5-7d1bcf73836b/ovsdbserver-nb/0.log" Dec 06 06:29:57 crc kubenswrapper[4706]: I1206 06:29:57.911537 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad/openstack-network-exporter/0.log" Dec 06 06:29:57 crc kubenswrapper[4706]: I1206 06:29:57.943926 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad/ovsdbserver-sb/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.184788 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-789868f976-vz5nh_8507b27e-a504-499e-bfea-e8c0397ff528/placement-api/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.215823 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-789868f976-vz5nh_8507b27e-a504-499e-bfea-e8c0397ff528/placement-log/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.247335 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_965d89e8-6db9-49d7-b516-ee4039b050eb/setup-container/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.504925 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_6be686b8-8844-4721-8b68-cd8b4d338517/setup-container/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.528621 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_965d89e8-6db9-49d7-b516-ee4039b050eb/setup-container/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.530103 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_965d89e8-6db9-49d7-b516-ee4039b050eb/rabbitmq/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.777861 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_6be686b8-8844-4721-8b68-cd8b4d338517/setup-container/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.826792 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_6be686b8-8844-4721-8b68-cd8b4d338517/rabbitmq/0.log" Dec 06 06:29:58 crc kubenswrapper[4706]: I1206 06:29:58.864468 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5_5d6e830f-730f-43e2-8218-e247e8a663df/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.106726 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-dpmdb_0df1eee4-ea9f-4409-b17c-8b6b37985814/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.130295 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75_7051aff0-e824-43eb-a501-3c02108f96ee/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.367346 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-gf4l9_fe026aef-fa96-451a-b38d-de4406116ea7/ssh-known-hosts-edpm-deployment/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.376222 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-6bt4j_70676b1a-d6a7-4b05-b15a-fa2661a1a77b/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.600880 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7f666db4c-wsc2b_38ce5378-a514-4454-8f74-73226df682e6/proxy-server/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.709934 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7f666db4c-wsc2b_38ce5378-a514-4454-8f74-73226df682e6/proxy-httpd/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.784104 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-9pw6t_abd1400e-de80-48fe-bad4-3e3c3af98355/swift-ring-rebalance/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.909239 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-auditor/0.log" Dec 06 06:29:59 crc kubenswrapper[4706]: I1206 06:29:59.957751 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-reaper/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.099769 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-replicator/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.128380 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-server/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.177075 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-auditor/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191313 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6"] Dec 06 06:30:00 crc kubenswrapper[4706]: E1206 06:30:00.191709 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="extract-content" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191725 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="extract-content" Dec 06 06:30:00 crc kubenswrapper[4706]: E1206 06:30:00.191737 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5fc6d73-f4ef-4f6f-8e6b-545112082808" containerName="container-00" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191743 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5fc6d73-f4ef-4f6f-8e6b-545112082808" containerName="container-00" Dec 06 06:30:00 crc kubenswrapper[4706]: E1206 06:30:00.191754 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="registry-server" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191760 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="registry-server" Dec 06 06:30:00 crc kubenswrapper[4706]: E1206 06:30:00.191774 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="extract-utilities" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191780 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="extract-utilities" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191966 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5fc6d73-f4ef-4f6f-8e6b-545112082808" containerName="container-00" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.191983 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="390cc982-f29d-4ace-b23c-0c77b0bbc87e" containerName="registry-server" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.192633 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.197187 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.198646 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.234554 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6"] Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.261513 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-replicator/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.305989 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-secret-volume\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.306191 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5rvz\" (UniqueName: \"kubernetes.io/projected/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-kube-api-access-z5rvz\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.306223 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-config-volume\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.331766 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-server/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.364915 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-updater/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.406024 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-auditor/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.407582 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-secret-volume\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.407690 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5rvz\" (UniqueName: \"kubernetes.io/projected/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-kube-api-access-z5rvz\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.407718 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-config-volume\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.408791 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-config-volume\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.416820 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-secret-volume\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.433987 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5rvz\" (UniqueName: \"kubernetes.io/projected/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-kube-api-access-z5rvz\") pod \"collect-profiles-29416710-ddmg6\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.477082 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-expirer/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.532206 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.710872 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-replicator/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.759768 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-server/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.772296 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-updater/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.811932 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/rsync/0.log" Dec 06 06:30:00 crc kubenswrapper[4706]: I1206 06:30:00.934937 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/swift-recon-cron/0.log" Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.038524 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6"] Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.134249 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb_19fbc54f-2695-4d41-9221-c5d2731510c1/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.327093 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" event={"ID":"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d","Type":"ContainerStarted","Data":"94ba27b2af7454e7b276f8c70464432bd62a92fad9fd7960ebfb930857b9fa65"} Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.327147 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" event={"ID":"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d","Type":"ContainerStarted","Data":"fbcff70fad49926e351178f904c717557ada2386bef518da0076eedac6f384d8"} Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.349811 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" podStartSLOduration=1.349789594 podStartE2EDuration="1.349789594s" podCreationTimestamp="2025-12-06 06:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 06:30:01.346263488 +0000 UTC m=+4223.674087432" watchObservedRunningTime="2025-12-06 06:30:01.349789594 +0000 UTC m=+4223.677613548" Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.378157 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_53ac9b54-4c61-4101-96d0-c247c09c0cdd/tempest-tests-tempest-tests-runner/0.log" Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.560549 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_5711989a-45c2-4c7f-b728-3d5c0eb851a6/test-operator-logs-container/0.log" Dec 06 06:30:01 crc kubenswrapper[4706]: I1206 06:30:01.598105 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq_6c4f877c-27aa-40eb-b5ff-2968f748a978/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:30:02 crc kubenswrapper[4706]: I1206 06:30:02.211547 4706 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod6c767422-854e-45fd-a5a2-d7d061c54829"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod6c767422-854e-45fd-a5a2-d7d061c54829] : Timed out while waiting for systemd to remove kubepods-besteffort-pod6c767422_854e_45fd_a5a2_d7d061c54829.slice" Dec 06 06:30:02 crc kubenswrapper[4706]: E1206 06:30:02.211902 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod6c767422-854e-45fd-a5a2-d7d061c54829] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod6c767422-854e-45fd-a5a2-d7d061c54829] : Timed out while waiting for systemd to remove kubepods-besteffort-pod6c767422_854e_45fd_a5a2_d7d061c54829.slice" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" podUID="6c767422-854e-45fd-a5a2-d7d061c54829" Dec 06 06:30:02 crc kubenswrapper[4706]: I1206 06:30:02.339426 4706 generic.go:334] "Generic (PLEG): container finished" podID="c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" containerID="94ba27b2af7454e7b276f8c70464432bd62a92fad9fd7960ebfb930857b9fa65" exitCode=0 Dec 06 06:30:02 crc kubenswrapper[4706]: I1206 06:30:02.339504 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/crc-debug-c5wpj" Dec 06 06:30:02 crc kubenswrapper[4706]: I1206 06:30:02.340193 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" event={"ID":"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d","Type":"ContainerDied","Data":"94ba27b2af7454e7b276f8c70464432bd62a92fad9fd7960ebfb930857b9fa65"} Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.779504 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.807669 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z5rvz\" (UniqueName: \"kubernetes.io/projected/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-kube-api-access-z5rvz\") pod \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.807844 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-secret-volume\") pod \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.807890 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-config-volume\") pod \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\" (UID: \"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d\") " Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.808902 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-config-volume" (OuterVolumeSpecName: "config-volume") pod "c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" (UID: "c4ea269f-188c-40ff-9b61-e10d2e7e7d3d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.816434 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" (UID: "c4ea269f-188c-40ff-9b61-e10d2e7e7d3d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.846210 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-kube-api-access-z5rvz" (OuterVolumeSpecName: "kube-api-access-z5rvz") pod "c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" (UID: "c4ea269f-188c-40ff-9b61-e10d2e7e7d3d"). InnerVolumeSpecName "kube-api-access-z5rvz". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.909251 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.909515 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:30:03 crc kubenswrapper[4706]: I1206 06:30:03.909612 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z5rvz\" (UniqueName: \"kubernetes.io/projected/c4ea269f-188c-40ff-9b61-e10d2e7e7d3d-kube-api-access-z5rvz\") on node \"crc\" DevicePath \"\"" Dec 06 06:30:04 crc kubenswrapper[4706]: I1206 06:30:04.358722 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" event={"ID":"c4ea269f-188c-40ff-9b61-e10d2e7e7d3d","Type":"ContainerDied","Data":"fbcff70fad49926e351178f904c717557ada2386bef518da0076eedac6f384d8"} Dec 06 06:30:04 crc kubenswrapper[4706]: I1206 06:30:04.358774 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fbcff70fad49926e351178f904c717557ada2386bef518da0076eedac6f384d8" Dec 06 06:30:04 crc kubenswrapper[4706]: I1206 06:30:04.358845 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416710-ddmg6" Dec 06 06:30:04 crc kubenswrapper[4706]: I1206 06:30:04.420337 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw"] Dec 06 06:30:04 crc kubenswrapper[4706]: I1206 06:30:04.429071 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416665-vplcw"] Dec 06 06:30:06 crc kubenswrapper[4706]: I1206 06:30:06.050868 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f9e636a-ddae-4169-b6a0-f00f304bbeaa" path="/var/lib/kubelet/pods/8f9e636a-ddae-4169-b6a0-f00f304bbeaa/volumes" Dec 06 06:30:09 crc kubenswrapper[4706]: I1206 06:30:09.122335 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf/memcached/0.log" Dec 06 06:30:12 crc kubenswrapper[4706]: I1206 06:30:12.636489 4706 scope.go:117] "RemoveContainer" containerID="a465b7a27d5d5e76fdba5a76ce099b0fddfa69b0d9895eed51f0c98288086d5d" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.595290 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/util/0.log" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.729411 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/pull/0.log" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.740586 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/util/0.log" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.783224 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/pull/0.log" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.892313 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/pull/0.log" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.922708 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/util/0.log" Dec 06 06:30:26 crc kubenswrapper[4706]: I1206 06:30:26.930458 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/extract/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.089954 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-mpkjv_31b78248-5727-4a30-95ab-d75acc5a752b/kube-rbac-proxy/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.204202 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-mpkjv_31b78248-5727-4a30-95ab-d75acc5a752b/manager/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.211993 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-msm2n_9e547dc3-41db-48ab-b791-885c0f98f4c8/kube-rbac-proxy/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.349718 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-msm2n_9e547dc3-41db-48ab-b791-885c0f98f4c8/manager/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.373517 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-wzlpz_74049eb3-6721-4234-80cd-01b530d2d9e5/kube-rbac-proxy/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.416694 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-wzlpz_74049eb3-6721-4234-80cd-01b530d2d9e5/manager/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.583234 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-77987cd8cd-vm2sj_de139c22-08fa-4b45-abda-af9394c16eac/kube-rbac-proxy/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.591354 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-77987cd8cd-vm2sj_de139c22-08fa-4b45-abda-af9394c16eac/manager/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.761239 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-qr75r_646d8bbb-f505-42f9-a23d-15b999c5acce/kube-rbac-proxy/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.763008 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-qr75r_646d8bbb-f505-42f9-a23d-15b999c5acce/manager/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.798376 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-fcp7z_b6524ab6-7d15-4cf4-b3b2-dc9f0d014930/kube-rbac-proxy/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.937588 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-fcp7z_b6524ab6-7d15-4cf4-b3b2-dc9f0d014930/manager/0.log" Dec 06 06:30:27 crc kubenswrapper[4706]: I1206 06:30:27.962521 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x7wwl_0e17be2a-d936-4d91-862a-b92014212bf6/kube-rbac-proxy/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.158204 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-jspvh_eacc98a4-22bf-4a38-8de0-2bf6fd395572/kube-rbac-proxy/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.166673 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x7wwl_0e17be2a-d936-4d91-862a-b92014212bf6/manager/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.226436 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-jspvh_eacc98a4-22bf-4a38-8de0-2bf6fd395572/manager/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.318858 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-jvwv2_34163fc1-16c7-4942-9eda-5afb77180d00/kube-rbac-proxy/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.388683 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-jvwv2_34163fc1-16c7-4942-9eda-5afb77180d00/manager/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.498076 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7c79b5df47-xctf2_b67589f2-8ee8-43a3-aaf9-e1767c0a75c5/kube-rbac-proxy/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.540618 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7c79b5df47-xctf2_b67589f2-8ee8-43a3-aaf9-e1767c0a75c5/manager/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.626078 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-nhzq9_5f25d928-9f7a-4d1b-b1bb-abc58dad2080/kube-rbac-proxy/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.739704 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-nhzq9_5f25d928-9f7a-4d1b-b1bb-abc58dad2080/manager/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.786447 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-bkvhv_d28af7d8-b64b-48f1-9ac1-7f1cfc361751/kube-rbac-proxy/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.865652 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-bkvhv_d28af7d8-b64b-48f1-9ac1-7f1cfc361751/manager/0.log" Dec 06 06:30:28 crc kubenswrapper[4706]: I1206 06:30:28.917320 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-qfpfj_0928e1f4-7912-465f-a991-9d0dda0a42d1/kube-rbac-proxy/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.061762 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-qfpfj_0928e1f4-7912-465f-a991-9d0dda0a42d1/manager/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.099518 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-k5hqn_b980759b-88cf-47ee-b7b0-12ebaddba6cd/kube-rbac-proxy/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.180219 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-k5hqn_b980759b-88cf-47ee-b7b0-12ebaddba6cd/manager/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.229417 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd455bk5_09479c44-e706-4f72-a1f3-6b71d4b29f0b/kube-rbac-proxy/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.317774 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd455bk5_09479c44-e706-4f72-a1f3-6b71d4b29f0b/manager/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.741166 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-8stsm_c7986937-a648-4cc0-89ae-e718dcccffad/registry-server/0.log" Dec 06 06:30:29 crc kubenswrapper[4706]: I1206 06:30:29.774638 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5964599cfc-xxv5r_235972bf-6d17-4167-b41f-98483ea3f1ba/operator/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.012173 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-q9dk8_9914167a-34c0-42fc-ac0c-af6f866b437f/kube-rbac-proxy/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.126686 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-q9dk8_9914167a-34c0-42fc-ac0c-af6f866b437f/manager/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.268237 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-gz7v6_47a5741f-61c5-4de3-b020-50c25f0570f2/kube-rbac-proxy/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.441548 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-gz7v6_47a5741f-61c5-4de3-b020-50c25f0570f2/manager/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.598127 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-pmfhv_2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb/operator/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.628712 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f6f47b7b7-lmnn4_36973f56-f6d5-4a12-b86e-4ad7bcb3df6f/manager/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.659972 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-jc6r8_73d3329e-7a93-4d32-b7ba-0d5d6b468432/kube-rbac-proxy/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.697349 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-jc6r8_73d3329e-7a93-4d32-b7ba-0d5d6b468432/manager/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.781853 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-lpjp5_bfd8649f-6345-40be-9193-e80b2ce0c1dc/kube-rbac-proxy/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.898612 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-lpjp5_bfd8649f-6345-40be-9193-e80b2ce0c1dc/manager/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.903551 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-qmnhr_ff8a3a6e-0623-417c-8e02-f16f34e3bfe9/kube-rbac-proxy/0.log" Dec 06 06:30:30 crc kubenswrapper[4706]: I1206 06:30:30.965072 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-qmnhr_ff8a3a6e-0623-417c-8e02-f16f34e3bfe9/manager/0.log" Dec 06 06:30:31 crc kubenswrapper[4706]: I1206 06:30:31.019549 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-tx6k9_1d93b83c-6e45-44bf-b9b1-d6163c85d6b1/kube-rbac-proxy/0.log" Dec 06 06:30:31 crc kubenswrapper[4706]: I1206 06:30:31.064396 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-tx6k9_1d93b83c-6e45-44bf-b9b1-d6163c85d6b1/manager/0.log" Dec 06 06:30:49 crc kubenswrapper[4706]: I1206 06:30:49.447588 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-wdsds_a417f08a-e64f-4a02-abb3-bee2049eb2e7/control-plane-machine-set-operator/0.log" Dec 06 06:30:49 crc kubenswrapper[4706]: I1206 06:30:49.636757 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-5g2s4_4230f0fb-f05e-4ae6-9755-db33865a6c33/machine-api-operator/0.log" Dec 06 06:30:49 crc kubenswrapper[4706]: I1206 06:30:49.641857 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-5g2s4_4230f0fb-f05e-4ae6-9755-db33865a6c33/kube-rbac-proxy/0.log" Dec 06 06:31:02 crc kubenswrapper[4706]: I1206 06:31:02.000212 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-pf8gt_44e622ec-7780-489c-bcf0-575ec84dc213/cert-manager-controller/0.log" Dec 06 06:31:02 crc kubenswrapper[4706]: I1206 06:31:02.175069 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fbpqk_58b45d75-86f1-4092-89ba-a1f924030512/cert-manager-cainjector/0.log" Dec 06 06:31:02 crc kubenswrapper[4706]: I1206 06:31:02.206027 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-22w82_052717cc-1d2a-4e9a-a6a3-897c1d529b1e/cert-manager-webhook/0.log" Dec 06 06:31:14 crc kubenswrapper[4706]: I1206 06:31:14.567954 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-8c75x_1d9e9551-a46a-42b6-a9b4-b78a3994239a/nmstate-console-plugin/0.log" Dec 06 06:31:14 crc kubenswrapper[4706]: I1206 06:31:14.780745 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-l4lvf_ab718c3d-1427-4fc0-b728-6925fca42caf/nmstate-handler/0.log" Dec 06 06:31:14 crc kubenswrapper[4706]: I1206 06:31:14.814380 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-5jtbm_eeea5f87-d6ea-47d3-86aa-4e5ed4562078/kube-rbac-proxy/0.log" Dec 06 06:31:14 crc kubenswrapper[4706]: I1206 06:31:14.814678 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-5jtbm_eeea5f87-d6ea-47d3-86aa-4e5ed4562078/nmstate-metrics/0.log" Dec 06 06:31:15 crc kubenswrapper[4706]: I1206 06:31:15.032293 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-xpljb_77c327d8-4531-43a9-991e-f913f7e1d02e/nmstate-operator/0.log" Dec 06 06:31:15 crc kubenswrapper[4706]: I1206 06:31:15.068252 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-r55tp_aa5bcff8-fac7-4a00-b7f7-312f70ad11b2/nmstate-webhook/0.log" Dec 06 06:31:28 crc kubenswrapper[4706]: I1206 06:31:28.547877 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-5gg6v_7009f978-2926-401b-bb27-4378dac2d69a/kube-rbac-proxy/0.log" Dec 06 06:31:28 crc kubenswrapper[4706]: I1206 06:31:28.688121 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-5gg6v_7009f978-2926-401b-bb27-4378dac2d69a/controller/0.log" Dec 06 06:31:28 crc kubenswrapper[4706]: I1206 06:31:28.784631 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:31:28 crc kubenswrapper[4706]: I1206 06:31:28.934878 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:31:28 crc kubenswrapper[4706]: I1206 06:31:28.948166 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:31:28 crc kubenswrapper[4706]: I1206 06:31:28.969024 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.003232 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.135807 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.157460 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.170722 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.201641 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.366240 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.373221 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.376325 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/controller/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.379468 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.576973 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/kube-rbac-proxy/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.580460 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/kube-rbac-proxy-frr/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.606336 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/frr-metrics/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.790075 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/reloader/0.log" Dec 06 06:31:29 crc kubenswrapper[4706]: I1206 06:31:29.853136 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-hwzgm_90735168-5b70-4282-9d00-6ca91facf758/frr-k8s-webhook-server/0.log" Dec 06 06:31:30 crc kubenswrapper[4706]: I1206 06:31:30.060771 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-67f666fcfb-5vg8w_86959832-935a-46cc-85bc-f0b9b39340a7/manager/0.log" Dec 06 06:31:30 crc kubenswrapper[4706]: I1206 06:31:30.292818 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-d44d656bf-lksks_2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726/webhook-server/0.log" Dec 06 06:31:30 crc kubenswrapper[4706]: I1206 06:31:30.747479 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/frr/0.log" Dec 06 06:31:30 crc kubenswrapper[4706]: I1206 06:31:30.764006 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-snzn5_6d8b765c-bd65-44fb-a959-b458e0c531a4/kube-rbac-proxy/0.log" Dec 06 06:31:31 crc kubenswrapper[4706]: I1206 06:31:31.093372 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-snzn5_6d8b765c-bd65-44fb-a959-b458e0c531a4/speaker/0.log" Dec 06 06:31:35 crc kubenswrapper[4706]: I1206 06:31:35.961252 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:31:35 crc kubenswrapper[4706]: I1206 06:31:35.961794 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.089293 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/util/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.256961 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/util/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.280840 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/pull/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.285213 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/pull/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.446632 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/util/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.497802 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/pull/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.505072 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/extract/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.628279 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/util/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.802105 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/util/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.810914 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/pull/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.819619 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/pull/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.963113 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/util/0.log" Dec 06 06:31:43 crc kubenswrapper[4706]: I1206 06:31:43.989007 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/pull/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.005326 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/extract/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.122601 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-utilities/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.267208 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-utilities/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.324468 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-content/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.326087 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-content/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.477480 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-utilities/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.498593 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-content/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.754268 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-utilities/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.901121 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-utilities/0.log" Dec 06 06:31:44 crc kubenswrapper[4706]: I1206 06:31:44.981292 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-content/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.053295 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-content/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.144665 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/registry-server/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.199840 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-content/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.200370 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-utilities/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.443376 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-9t9kd_03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d/marketplace-operator/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.662449 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-utilities/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.676374 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/registry-server/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.791476 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-utilities/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.812640 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-content/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.869453 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-content/0.log" Dec 06 06:31:45 crc kubenswrapper[4706]: I1206 06:31:45.994962 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-utilities/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.003917 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-content/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.197387 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/registry-server/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.231127 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-utilities/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.377013 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-utilities/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.396486 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-content/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.398819 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-content/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.563456 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-utilities/0.log" Dec 06 06:31:46 crc kubenswrapper[4706]: I1206 06:31:46.583435 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-content/0.log" Dec 06 06:31:47 crc kubenswrapper[4706]: I1206 06:31:47.025884 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/registry-server/0.log" Dec 06 06:32:05 crc kubenswrapper[4706]: I1206 06:32:05.962081 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:32:05 crc kubenswrapper[4706]: I1206 06:32:05.962609 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:32:35 crc kubenswrapper[4706]: I1206 06:32:35.961945 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:32:35 crc kubenswrapper[4706]: I1206 06:32:35.962589 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:32:35 crc kubenswrapper[4706]: I1206 06:32:35.962643 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:32:35 crc kubenswrapper[4706]: I1206 06:32:35.963543 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0712b5d6dd3cf50d5e8260c2d90bbd3bd3e7c8f2f055b870399a5e8978ffe708"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:32:35 crc kubenswrapper[4706]: I1206 06:32:35.963604 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://0712b5d6dd3cf50d5e8260c2d90bbd3bd3e7c8f2f055b870399a5e8978ffe708" gracePeriod=600 Dec 06 06:32:36 crc kubenswrapper[4706]: I1206 06:32:36.693311 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="0712b5d6dd3cf50d5e8260c2d90bbd3bd3e7c8f2f055b870399a5e8978ffe708" exitCode=0 Dec 06 06:32:36 crc kubenswrapper[4706]: I1206 06:32:36.693382 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"0712b5d6dd3cf50d5e8260c2d90bbd3bd3e7c8f2f055b870399a5e8978ffe708"} Dec 06 06:32:36 crc kubenswrapper[4706]: I1206 06:32:36.693710 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff"} Dec 06 06:32:36 crc kubenswrapper[4706]: I1206 06:32:36.693738 4706 scope.go:117] "RemoveContainer" containerID="c051e6b81a01e13dcce9ef40f9de5f9239e5d1cef97636810b6b112f1d6d058f" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.128982 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-phppn"] Dec 06 06:33:09 crc kubenswrapper[4706]: E1206 06:33:09.130006 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" containerName="collect-profiles" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.130023 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" containerName="collect-profiles" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.130238 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c4ea269f-188c-40ff-9b61-e10d2e7e7d3d" containerName="collect-profiles" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.131674 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.140295 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-phppn"] Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.189091 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-catalog-content\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.189258 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fk4nm\" (UniqueName: \"kubernetes.io/projected/2e630efa-89b1-4b6a-9737-4d6cd07981e8-kube-api-access-fk4nm\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.189303 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-utilities\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.290831 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fk4nm\" (UniqueName: \"kubernetes.io/projected/2e630efa-89b1-4b6a-9737-4d6cd07981e8-kube-api-access-fk4nm\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.290931 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-utilities\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.290961 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-catalog-content\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.291470 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-utilities\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.291556 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-catalog-content\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.697403 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fk4nm\" (UniqueName: \"kubernetes.io/projected/2e630efa-89b1-4b6a-9737-4d6cd07981e8-kube-api-access-fk4nm\") pod \"redhat-operators-phppn\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:09 crc kubenswrapper[4706]: I1206 06:33:09.754039 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:10 crc kubenswrapper[4706]: I1206 06:33:10.254074 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-phppn"] Dec 06 06:33:11 crc kubenswrapper[4706]: I1206 06:33:11.000388 4706 generic.go:334] "Generic (PLEG): container finished" podID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerID="c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72" exitCode=0 Dec 06 06:33:11 crc kubenswrapper[4706]: I1206 06:33:11.001023 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerDied","Data":"c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72"} Dec 06 06:33:11 crc kubenswrapper[4706]: I1206 06:33:11.001164 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerStarted","Data":"93457e86913220cf5a189d862af5c3d356a30fabef7668270be3af2b813de582"} Dec 06 06:33:11 crc kubenswrapper[4706]: I1206 06:33:11.004147 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:33:12 crc kubenswrapper[4706]: I1206 06:33:12.010126 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerStarted","Data":"99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29"} Dec 06 06:33:13 crc kubenswrapper[4706]: I1206 06:33:13.021663 4706 generic.go:334] "Generic (PLEG): container finished" podID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerID="99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29" exitCode=0 Dec 06 06:33:13 crc kubenswrapper[4706]: I1206 06:33:13.021722 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerDied","Data":"99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29"} Dec 06 06:33:14 crc kubenswrapper[4706]: I1206 06:33:14.032313 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerStarted","Data":"837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a"} Dec 06 06:33:14 crc kubenswrapper[4706]: I1206 06:33:14.063214 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-phppn" podStartSLOduration=2.586089342 podStartE2EDuration="5.063197662s" podCreationTimestamp="2025-12-06 06:33:09 +0000 UTC" firstStartedPulling="2025-12-06 06:33:11.003482955 +0000 UTC m=+4413.331306899" lastFinishedPulling="2025-12-06 06:33:13.480591275 +0000 UTC m=+4415.808415219" observedRunningTime="2025-12-06 06:33:14.057816986 +0000 UTC m=+4416.385640930" watchObservedRunningTime="2025-12-06 06:33:14.063197662 +0000 UTC m=+4416.391021596" Dec 06 06:33:19 crc kubenswrapper[4706]: I1206 06:33:19.754404 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:19 crc kubenswrapper[4706]: I1206 06:33:19.754966 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:20 crc kubenswrapper[4706]: I1206 06:33:20.034805 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:20 crc kubenswrapper[4706]: I1206 06:33:20.136246 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:20 crc kubenswrapper[4706]: I1206 06:33:20.276288 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-phppn"] Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.102574 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-phppn" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="registry-server" containerID="cri-o://837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a" gracePeriod=2 Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.571318 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.668369 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-catalog-content\") pod \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.668418 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fk4nm\" (UniqueName: \"kubernetes.io/projected/2e630efa-89b1-4b6a-9737-4d6cd07981e8-kube-api-access-fk4nm\") pod \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.668482 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-utilities\") pod \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\" (UID: \"2e630efa-89b1-4b6a-9737-4d6cd07981e8\") " Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.669609 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-utilities" (OuterVolumeSpecName: "utilities") pod "2e630efa-89b1-4b6a-9737-4d6cd07981e8" (UID: "2e630efa-89b1-4b6a-9737-4d6cd07981e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.670522 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.684382 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e630efa-89b1-4b6a-9737-4d6cd07981e8-kube-api-access-fk4nm" (OuterVolumeSpecName: "kube-api-access-fk4nm") pod "2e630efa-89b1-4b6a-9737-4d6cd07981e8" (UID: "2e630efa-89b1-4b6a-9737-4d6cd07981e8"). InnerVolumeSpecName "kube-api-access-fk4nm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:33:22 crc kubenswrapper[4706]: I1206 06:33:22.772690 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fk4nm\" (UniqueName: \"kubernetes.io/projected/2e630efa-89b1-4b6a-9737-4d6cd07981e8-kube-api-access-fk4nm\") on node \"crc\" DevicePath \"\"" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.113005 4706 generic.go:334] "Generic (PLEG): container finished" podID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerID="837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a" exitCode=0 Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.113304 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerDied","Data":"837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a"} Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.113336 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-phppn" event={"ID":"2e630efa-89b1-4b6a-9737-4d6cd07981e8","Type":"ContainerDied","Data":"93457e86913220cf5a189d862af5c3d356a30fabef7668270be3af2b813de582"} Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.113356 4706 scope.go:117] "RemoveContainer" containerID="837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.113491 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-phppn" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.132443 4706 scope.go:117] "RemoveContainer" containerID="99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.150686 4706 scope.go:117] "RemoveContainer" containerID="c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.206702 4706 scope.go:117] "RemoveContainer" containerID="837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a" Dec 06 06:33:23 crc kubenswrapper[4706]: E1206 06:33:23.207223 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a\": container with ID starting with 837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a not found: ID does not exist" containerID="837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.207273 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a"} err="failed to get container status \"837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a\": rpc error: code = NotFound desc = could not find container \"837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a\": container with ID starting with 837617f40e2fced8407b75f10a9cafcbd0f7153a5c55600dfb72d42dfa149b4a not found: ID does not exist" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.207311 4706 scope.go:117] "RemoveContainer" containerID="99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29" Dec 06 06:33:23 crc kubenswrapper[4706]: E1206 06:33:23.207641 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29\": container with ID starting with 99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29 not found: ID does not exist" containerID="99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.207667 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29"} err="failed to get container status \"99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29\": rpc error: code = NotFound desc = could not find container \"99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29\": container with ID starting with 99bd369f8aebdebb4ba6a8a781a367e06f9806a1dc3ad41af56d922325e3dc29 not found: ID does not exist" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.207686 4706 scope.go:117] "RemoveContainer" containerID="c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72" Dec 06 06:33:23 crc kubenswrapper[4706]: E1206 06:33:23.208604 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72\": container with ID starting with c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72 not found: ID does not exist" containerID="c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72" Dec 06 06:33:23 crc kubenswrapper[4706]: I1206 06:33:23.208646 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72"} err="failed to get container status \"c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72\": rpc error: code = NotFound desc = could not find container \"c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72\": container with ID starting with c878bd47e59d1d432a62424b0ce94c23bb1fc9f5e756e300008c38506e231a72 not found: ID does not exist" Dec 06 06:33:24 crc kubenswrapper[4706]: I1206 06:33:24.101550 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e630efa-89b1-4b6a-9737-4d6cd07981e8" (UID: "2e630efa-89b1-4b6a-9737-4d6cd07981e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:33:24 crc kubenswrapper[4706]: I1206 06:33:24.195925 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e630efa-89b1-4b6a-9737-4d6cd07981e8-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:33:24 crc kubenswrapper[4706]: I1206 06:33:24.349082 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-phppn"] Dec 06 06:33:24 crc kubenswrapper[4706]: I1206 06:33:24.372201 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-phppn"] Dec 06 06:33:24 crc kubenswrapper[4706]: E1206 06:33:24.540926 4706 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e630efa_89b1_4b6a_9737_4d6cd07981e8.slice/crio-93457e86913220cf5a189d862af5c3d356a30fabef7668270be3af2b813de582\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2e630efa_89b1_4b6a_9737_4d6cd07981e8.slice\": RecentStats: unable to find data in memory cache]" Dec 06 06:33:26 crc kubenswrapper[4706]: I1206 06:33:26.045960 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" path="/var/lib/kubelet/pods/2e630efa-89b1-4b6a-9737-4d6cd07981e8/volumes" Dec 06 06:33:33 crc kubenswrapper[4706]: I1206 06:33:33.247980 4706 generic.go:334] "Generic (PLEG): container finished" podID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerID="183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102" exitCode=0 Dec 06 06:33:33 crc kubenswrapper[4706]: I1206 06:33:33.248138 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" event={"ID":"ad589568-2d22-488b-a9c0-bb9b091f28a6","Type":"ContainerDied","Data":"183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102"} Dec 06 06:33:33 crc kubenswrapper[4706]: I1206 06:33:33.249329 4706 scope.go:117] "RemoveContainer" containerID="183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102" Dec 06 06:33:33 crc kubenswrapper[4706]: I1206 06:33:33.308219 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pbc2v_must-gather-8nq8m_ad589568-2d22-488b-a9c0-bb9b091f28a6/gather/0.log" Dec 06 06:33:41 crc kubenswrapper[4706]: I1206 06:33:41.640549 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-pbc2v/must-gather-8nq8m"] Dec 06 06:33:41 crc kubenswrapper[4706]: I1206 06:33:41.641610 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="copy" containerID="cri-o://f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4" gracePeriod=2 Dec 06 06:33:41 crc kubenswrapper[4706]: I1206 06:33:41.647627 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-pbc2v/must-gather-8nq8m"] Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.100255 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pbc2v_must-gather-8nq8m_ad589568-2d22-488b-a9c0-bb9b091f28a6/copy/0.log" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.101669 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.180847 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ad589568-2d22-488b-a9c0-bb9b091f28a6-must-gather-output\") pod \"ad589568-2d22-488b-a9c0-bb9b091f28a6\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.181315 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxhjl\" (UniqueName: \"kubernetes.io/projected/ad589568-2d22-488b-a9c0-bb9b091f28a6-kube-api-access-rxhjl\") pod \"ad589568-2d22-488b-a9c0-bb9b091f28a6\" (UID: \"ad589568-2d22-488b-a9c0-bb9b091f28a6\") " Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.190793 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad589568-2d22-488b-a9c0-bb9b091f28a6-kube-api-access-rxhjl" (OuterVolumeSpecName: "kube-api-access-rxhjl") pod "ad589568-2d22-488b-a9c0-bb9b091f28a6" (UID: "ad589568-2d22-488b-a9c0-bb9b091f28a6"). InnerVolumeSpecName "kube-api-access-rxhjl". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.288566 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxhjl\" (UniqueName: \"kubernetes.io/projected/ad589568-2d22-488b-a9c0-bb9b091f28a6-kube-api-access-rxhjl\") on node \"crc\" DevicePath \"\"" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.325635 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-pbc2v_must-gather-8nq8m_ad589568-2d22-488b-a9c0-bb9b091f28a6/copy/0.log" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.325966 4706 generic.go:334] "Generic (PLEG): container finished" podID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerID="f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4" exitCode=143 Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.326033 4706 scope.go:117] "RemoveContainer" containerID="f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.326175 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-pbc2v/must-gather-8nq8m" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.341125 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ad589568-2d22-488b-a9c0-bb9b091f28a6-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ad589568-2d22-488b-a9c0-bb9b091f28a6" (UID: "ad589568-2d22-488b-a9c0-bb9b091f28a6"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.343543 4706 scope.go:117] "RemoveContainer" containerID="183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.385123 4706 scope.go:117] "RemoveContainer" containerID="f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4" Dec 06 06:33:42 crc kubenswrapper[4706]: E1206 06:33:42.385632 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4\": container with ID starting with f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4 not found: ID does not exist" containerID="f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.385678 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4"} err="failed to get container status \"f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4\": rpc error: code = NotFound desc = could not find container \"f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4\": container with ID starting with f1e76b36fd35275dc35d8aa1f1aa573ce2ef31dac4594e1a3ee2d7ddc71ca7f4 not found: ID does not exist" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.385705 4706 scope.go:117] "RemoveContainer" containerID="183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102" Dec 06 06:33:42 crc kubenswrapper[4706]: E1206 06:33:42.386120 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102\": container with ID starting with 183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102 not found: ID does not exist" containerID="183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.386163 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102"} err="failed to get container status \"183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102\": rpc error: code = NotFound desc = could not find container \"183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102\": container with ID starting with 183dfc9f4435fea54de44ee16a888c04f667702e690126e2da7716bae0ac1102 not found: ID does not exist" Dec 06 06:33:42 crc kubenswrapper[4706]: I1206 06:33:42.390224 4706 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ad589568-2d22-488b-a9c0-bb9b091f28a6-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 06 06:33:44 crc kubenswrapper[4706]: I1206 06:33:44.047234 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" path="/var/lib/kubelet/pods/ad589568-2d22-488b-a9c0-bb9b091f28a6/volumes" Dec 06 06:35:05 crc kubenswrapper[4706]: I1206 06:35:05.961547 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:35:05 crc kubenswrapper[4706]: I1206 06:35:05.962293 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:35:12 crc kubenswrapper[4706]: I1206 06:35:12.875967 4706 scope.go:117] "RemoveContainer" containerID="8393625bea98b924f1b88e544162ba373f7c3ce3addcdbf2bead211eba6d84af" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.240461 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-hfl8x"] Dec 06 06:35:24 crc kubenswrapper[4706]: E1206 06:35:24.241539 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="registry-server" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241555 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="registry-server" Dec 06 06:35:24 crc kubenswrapper[4706]: E1206 06:35:24.241587 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="extract-utilities" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241595 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="extract-utilities" Dec 06 06:35:24 crc kubenswrapper[4706]: E1206 06:35:24.241608 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="copy" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241617 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="copy" Dec 06 06:35:24 crc kubenswrapper[4706]: E1206 06:35:24.241635 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="extract-content" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241643 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="extract-content" Dec 06 06:35:24 crc kubenswrapper[4706]: E1206 06:35:24.241666 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="gather" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241675 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="gather" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241879 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="gather" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241898 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad589568-2d22-488b-a9c0-bb9b091f28a6" containerName="copy" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.241918 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e630efa-89b1-4b6a-9737-4d6cd07981e8" containerName="registry-server" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.243514 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.262038 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hfl8x"] Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.379106 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-utilities\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.379436 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-catalog-content\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.379530 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dfw4s\" (UniqueName: \"kubernetes.io/projected/a37f0812-fcd0-4319-a12a-40ac2db43097-kube-api-access-dfw4s\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.481749 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-catalog-content\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.482032 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dfw4s\" (UniqueName: \"kubernetes.io/projected/a37f0812-fcd0-4319-a12a-40ac2db43097-kube-api-access-dfw4s\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.482272 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-utilities\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.482309 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-catalog-content\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.482527 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-utilities\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.502450 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dfw4s\" (UniqueName: \"kubernetes.io/projected/a37f0812-fcd0-4319-a12a-40ac2db43097-kube-api-access-dfw4s\") pod \"certified-operators-hfl8x\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:24 crc kubenswrapper[4706]: I1206 06:35:24.587596 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:25 crc kubenswrapper[4706]: I1206 06:35:25.102352 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-hfl8x"] Dec 06 06:35:25 crc kubenswrapper[4706]: I1206 06:35:25.612094 4706 generic.go:334] "Generic (PLEG): container finished" podID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerID="88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be" exitCode=0 Dec 06 06:35:25 crc kubenswrapper[4706]: I1206 06:35:25.612151 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerDied","Data":"88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be"} Dec 06 06:35:25 crc kubenswrapper[4706]: I1206 06:35:25.612177 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerStarted","Data":"05f3c8c145e8cc37ffa58f304645b8607c78077947ee0b80b4c89049d8bf3d0a"} Dec 06 06:35:26 crc kubenswrapper[4706]: I1206 06:35:26.622865 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerStarted","Data":"0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605"} Dec 06 06:35:27 crc kubenswrapper[4706]: I1206 06:35:27.633723 4706 generic.go:334] "Generic (PLEG): container finished" podID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerID="0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605" exitCode=0 Dec 06 06:35:27 crc kubenswrapper[4706]: I1206 06:35:27.633776 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerDied","Data":"0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605"} Dec 06 06:35:28 crc kubenswrapper[4706]: I1206 06:35:28.646581 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerStarted","Data":"51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70"} Dec 06 06:35:28 crc kubenswrapper[4706]: I1206 06:35:28.676825 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-hfl8x" podStartSLOduration=2.143788128 podStartE2EDuration="4.676808544s" podCreationTimestamp="2025-12-06 06:35:24 +0000 UTC" firstStartedPulling="2025-12-06 06:35:25.614387394 +0000 UTC m=+4547.942211338" lastFinishedPulling="2025-12-06 06:35:28.14740781 +0000 UTC m=+4550.475231754" observedRunningTime="2025-12-06 06:35:28.669738563 +0000 UTC m=+4550.997562507" watchObservedRunningTime="2025-12-06 06:35:28.676808544 +0000 UTC m=+4551.004632488" Dec 06 06:35:34 crc kubenswrapper[4706]: I1206 06:35:34.588643 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:34 crc kubenswrapper[4706]: I1206 06:35:34.589256 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:34 crc kubenswrapper[4706]: I1206 06:35:34.634839 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:34 crc kubenswrapper[4706]: I1206 06:35:34.749879 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:34 crc kubenswrapper[4706]: I1206 06:35:34.867687 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hfl8x"] Dec 06 06:35:35 crc kubenswrapper[4706]: I1206 06:35:35.961874 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:35:35 crc kubenswrapper[4706]: I1206 06:35:35.961939 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:35:36 crc kubenswrapper[4706]: I1206 06:35:36.713847 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-hfl8x" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="registry-server" containerID="cri-o://51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70" gracePeriod=2 Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.179526 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.236723 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dfw4s\" (UniqueName: \"kubernetes.io/projected/a37f0812-fcd0-4319-a12a-40ac2db43097-kube-api-access-dfw4s\") pod \"a37f0812-fcd0-4319-a12a-40ac2db43097\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.237415 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-catalog-content\") pod \"a37f0812-fcd0-4319-a12a-40ac2db43097\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.237825 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-utilities\") pod \"a37f0812-fcd0-4319-a12a-40ac2db43097\" (UID: \"a37f0812-fcd0-4319-a12a-40ac2db43097\") " Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.239143 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-utilities" (OuterVolumeSpecName: "utilities") pod "a37f0812-fcd0-4319-a12a-40ac2db43097" (UID: "a37f0812-fcd0-4319-a12a-40ac2db43097"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.242281 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a37f0812-fcd0-4319-a12a-40ac2db43097-kube-api-access-dfw4s" (OuterVolumeSpecName: "kube-api-access-dfw4s") pod "a37f0812-fcd0-4319-a12a-40ac2db43097" (UID: "a37f0812-fcd0-4319-a12a-40ac2db43097"). InnerVolumeSpecName "kube-api-access-dfw4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.304371 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a37f0812-fcd0-4319-a12a-40ac2db43097" (UID: "a37f0812-fcd0-4319-a12a-40ac2db43097"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.342877 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dfw4s\" (UniqueName: \"kubernetes.io/projected/a37f0812-fcd0-4319-a12a-40ac2db43097-kube-api-access-dfw4s\") on node \"crc\" DevicePath \"\"" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.342914 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.342925 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a37f0812-fcd0-4319-a12a-40ac2db43097-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.726194 4706 generic.go:334] "Generic (PLEG): container finished" podID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerID="51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70" exitCode=0 Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.726234 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerDied","Data":"51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70"} Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.726259 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-hfl8x" event={"ID":"a37f0812-fcd0-4319-a12a-40ac2db43097","Type":"ContainerDied","Data":"05f3c8c145e8cc37ffa58f304645b8607c78077947ee0b80b4c89049d8bf3d0a"} Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.726276 4706 scope.go:117] "RemoveContainer" containerID="51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.726379 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-hfl8x" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.765986 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-hfl8x"] Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.767796 4706 scope.go:117] "RemoveContainer" containerID="0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.779278 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-hfl8x"] Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.793523 4706 scope.go:117] "RemoveContainer" containerID="88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.843801 4706 scope.go:117] "RemoveContainer" containerID="51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70" Dec 06 06:35:37 crc kubenswrapper[4706]: E1206 06:35:37.844299 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70\": container with ID starting with 51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70 not found: ID does not exist" containerID="51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.844533 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70"} err="failed to get container status \"51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70\": rpc error: code = NotFound desc = could not find container \"51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70\": container with ID starting with 51055b3a26226d87b0f2215f13014928721f7382dd572043d519c82aa79cad70 not found: ID does not exist" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.844568 4706 scope.go:117] "RemoveContainer" containerID="0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605" Dec 06 06:35:37 crc kubenswrapper[4706]: E1206 06:35:37.844944 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605\": container with ID starting with 0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605 not found: ID does not exist" containerID="0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.844978 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605"} err="failed to get container status \"0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605\": rpc error: code = NotFound desc = could not find container \"0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605\": container with ID starting with 0f8c0be6940fe076ae752f7ad7c6f1b36935d3bfa84abba8d7fff2b69544e605 not found: ID does not exist" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.844997 4706 scope.go:117] "RemoveContainer" containerID="88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be" Dec 06 06:35:37 crc kubenswrapper[4706]: E1206 06:35:37.845290 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be\": container with ID starting with 88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be not found: ID does not exist" containerID="88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be" Dec 06 06:35:37 crc kubenswrapper[4706]: I1206 06:35:37.845324 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be"} err="failed to get container status \"88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be\": rpc error: code = NotFound desc = could not find container \"88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be\": container with ID starting with 88e099701210bbe5ee5b58e1c8ebf47c576581d4d887180fd05875ca789985be not found: ID does not exist" Dec 06 06:35:38 crc kubenswrapper[4706]: I1206 06:35:38.050946 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" path="/var/lib/kubelet/pods/a37f0812-fcd0-4319-a12a-40ac2db43097/volumes" Dec 06 06:36:05 crc kubenswrapper[4706]: I1206 06:36:05.961414 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:36:05 crc kubenswrapper[4706]: I1206 06:36:05.961955 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:36:05 crc kubenswrapper[4706]: I1206 06:36:05.962001 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:36:05 crc kubenswrapper[4706]: I1206 06:36:05.962811 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:36:05 crc kubenswrapper[4706]: I1206 06:36:05.962874 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" gracePeriod=600 Dec 06 06:36:06 crc kubenswrapper[4706]: E1206 06:36:06.084575 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:36:07 crc kubenswrapper[4706]: I1206 06:36:07.000566 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" exitCode=0 Dec 06 06:36:07 crc kubenswrapper[4706]: I1206 06:36:07.000641 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff"} Dec 06 06:36:07 crc kubenswrapper[4706]: I1206 06:36:07.001255 4706 scope.go:117] "RemoveContainer" containerID="0712b5d6dd3cf50d5e8260c2d90bbd3bd3e7c8f2f055b870399a5e8978ffe708" Dec 06 06:36:07 crc kubenswrapper[4706]: I1206 06:36:07.001916 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:36:07 crc kubenswrapper[4706]: E1206 06:36:07.002290 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:36:18 crc kubenswrapper[4706]: I1206 06:36:18.047696 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:36:18 crc kubenswrapper[4706]: E1206 06:36:18.056010 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:36:31 crc kubenswrapper[4706]: I1206 06:36:31.036269 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:36:31 crc kubenswrapper[4706]: E1206 06:36:31.037074 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.535396 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t68mk/must-gather-gxctj"] Dec 06 06:36:34 crc kubenswrapper[4706]: E1206 06:36:34.536755 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="registry-server" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.536777 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="registry-server" Dec 06 06:36:34 crc kubenswrapper[4706]: E1206 06:36:34.536797 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="extract-content" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.536808 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="extract-content" Dec 06 06:36:34 crc kubenswrapper[4706]: E1206 06:36:34.536820 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="extract-utilities" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.536829 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="extract-utilities" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.537034 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="a37f0812-fcd0-4319-a12a-40ac2db43097" containerName="registry-server" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.538475 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.546750 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-t68mk"/"openshift-service-ca.crt" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.550495 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-t68mk"/"kube-root-ca.crt" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.550703 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-t68mk"/"default-dockercfg-gj7mg" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.551814 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-t68mk/must-gather-gxctj"] Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.608301 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dj4wm\" (UniqueName: \"kubernetes.io/projected/b43aae1d-2a9a-44de-a8cf-c18c9660199a-kube-api-access-dj4wm\") pod \"must-gather-gxctj\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.608432 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b43aae1d-2a9a-44de-a8cf-c18c9660199a-must-gather-output\") pod \"must-gather-gxctj\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.709722 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b43aae1d-2a9a-44de-a8cf-c18c9660199a-must-gather-output\") pod \"must-gather-gxctj\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.709880 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dj4wm\" (UniqueName: \"kubernetes.io/projected/b43aae1d-2a9a-44de-a8cf-c18c9660199a-kube-api-access-dj4wm\") pod \"must-gather-gxctj\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.710749 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b43aae1d-2a9a-44de-a8cf-c18c9660199a-must-gather-output\") pod \"must-gather-gxctj\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.735222 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dj4wm\" (UniqueName: \"kubernetes.io/projected/b43aae1d-2a9a-44de-a8cf-c18c9660199a-kube-api-access-dj4wm\") pod \"must-gather-gxctj\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:34 crc kubenswrapper[4706]: I1206 06:36:34.872488 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:36:35 crc kubenswrapper[4706]: I1206 06:36:35.345282 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-t68mk/must-gather-gxctj"] Dec 06 06:36:36 crc kubenswrapper[4706]: I1206 06:36:36.294884 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/must-gather-gxctj" event={"ID":"b43aae1d-2a9a-44de-a8cf-c18c9660199a","Type":"ContainerStarted","Data":"96573c9c398a2c4facac229c941bd87216f4808273c7853d13db00c40d346e1d"} Dec 06 06:36:36 crc kubenswrapper[4706]: I1206 06:36:36.295501 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/must-gather-gxctj" event={"ID":"b43aae1d-2a9a-44de-a8cf-c18c9660199a","Type":"ContainerStarted","Data":"c9b268c81c0b2a440404ae7fbd529a91cb43cd4b27b73f39149e6b798f5b022d"} Dec 06 06:36:36 crc kubenswrapper[4706]: I1206 06:36:36.295516 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/must-gather-gxctj" event={"ID":"b43aae1d-2a9a-44de-a8cf-c18c9660199a","Type":"ContainerStarted","Data":"cf1277a261fabcfb0eec526638d33d775f8e6b3cb36e85da51e823c4753fefe3"} Dec 06 06:36:36 crc kubenswrapper[4706]: I1206 06:36:36.331872 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-t68mk/must-gather-gxctj" podStartSLOduration=2.331851077 podStartE2EDuration="2.331851077s" podCreationTimestamp="2025-12-06 06:36:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 06:36:36.321431544 +0000 UTC m=+4618.649255498" watchObservedRunningTime="2025-12-06 06:36:36.331851077 +0000 UTC m=+4618.659675041" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.249995 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t68mk/crc-debug-xfnmh"] Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.252985 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.307473 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-host\") pod \"crc-debug-xfnmh\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.307894 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcz2x\" (UniqueName: \"kubernetes.io/projected/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-kube-api-access-fcz2x\") pod \"crc-debug-xfnmh\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.410186 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-host\") pod \"crc-debug-xfnmh\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.410309 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcz2x\" (UniqueName: \"kubernetes.io/projected/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-kube-api-access-fcz2x\") pod \"crc-debug-xfnmh\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.411023 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-host\") pod \"crc-debug-xfnmh\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.429038 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcz2x\" (UniqueName: \"kubernetes.io/projected/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-kube-api-access-fcz2x\") pod \"crc-debug-xfnmh\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: I1206 06:36:39.586461 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:36:39 crc kubenswrapper[4706]: W1206 06:36:39.647648 4706 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod05ff8b3c_ff37_4e62_9f0a_06a885a824b9.slice/crio-77b9476cfd91569d204fdf7029ad27f2056dc4872c33a9a6a9990f3e1fe1a171 WatchSource:0}: Error finding container 77b9476cfd91569d204fdf7029ad27f2056dc4872c33a9a6a9990f3e1fe1a171: Status 404 returned error can't find the container with id 77b9476cfd91569d204fdf7029ad27f2056dc4872c33a9a6a9990f3e1fe1a171 Dec 06 06:36:40 crc kubenswrapper[4706]: I1206 06:36:40.334105 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" event={"ID":"05ff8b3c-ff37-4e62-9f0a-06a885a824b9","Type":"ContainerStarted","Data":"b9570aa19c6d3c854b0d71d08d1354b81edb3434927cb46927d11639dfa0b1e6"} Dec 06 06:36:40 crc kubenswrapper[4706]: I1206 06:36:40.334616 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" event={"ID":"05ff8b3c-ff37-4e62-9f0a-06a885a824b9","Type":"ContainerStarted","Data":"77b9476cfd91569d204fdf7029ad27f2056dc4872c33a9a6a9990f3e1fe1a171"} Dec 06 06:36:40 crc kubenswrapper[4706]: I1206 06:36:40.349358 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" podStartSLOduration=1.349339625 podStartE2EDuration="1.349339625s" podCreationTimestamp="2025-12-06 06:36:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 06:36:40.346120498 +0000 UTC m=+4622.673944452" watchObservedRunningTime="2025-12-06 06:36:40.349339625 +0000 UTC m=+4622.677163589" Dec 06 06:36:42 crc kubenswrapper[4706]: I1206 06:36:42.036195 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:36:42 crc kubenswrapper[4706]: E1206 06:36:42.036914 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:36:54 crc kubenswrapper[4706]: I1206 06:36:54.035801 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:36:54 crc kubenswrapper[4706]: E1206 06:36:54.036580 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:37:06 crc kubenswrapper[4706]: I1206 06:37:06.035799 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:37:06 crc kubenswrapper[4706]: E1206 06:37:06.036537 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:37:13 crc kubenswrapper[4706]: I1206 06:37:13.595905 4706 generic.go:334] "Generic (PLEG): container finished" podID="05ff8b3c-ff37-4e62-9f0a-06a885a824b9" containerID="b9570aa19c6d3c854b0d71d08d1354b81edb3434927cb46927d11639dfa0b1e6" exitCode=0 Dec 06 06:37:13 crc kubenswrapper[4706]: I1206 06:37:13.595977 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" event={"ID":"05ff8b3c-ff37-4e62-9f0a-06a885a824b9","Type":"ContainerDied","Data":"b9570aa19c6d3c854b0d71d08d1354b81edb3434927cb46927d11639dfa0b1e6"} Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.741253 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.788563 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t68mk/crc-debug-xfnmh"] Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.800111 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t68mk/crc-debug-xfnmh"] Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.889600 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-host\") pod \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.889757 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-host" (OuterVolumeSpecName: "host") pod "05ff8b3c-ff37-4e62-9f0a-06a885a824b9" (UID: "05ff8b3c-ff37-4e62-9f0a-06a885a824b9"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.890003 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcz2x\" (UniqueName: \"kubernetes.io/projected/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-kube-api-access-fcz2x\") pod \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\" (UID: \"05ff8b3c-ff37-4e62-9f0a-06a885a824b9\") " Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.890559 4706 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-host\") on node \"crc\" DevicePath \"\"" Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.894974 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-kube-api-access-fcz2x" (OuterVolumeSpecName: "kube-api-access-fcz2x") pod "05ff8b3c-ff37-4e62-9f0a-06a885a824b9" (UID: "05ff8b3c-ff37-4e62-9f0a-06a885a824b9"). InnerVolumeSpecName "kube-api-access-fcz2x". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:37:14 crc kubenswrapper[4706]: I1206 06:37:14.992603 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcz2x\" (UniqueName: \"kubernetes.io/projected/05ff8b3c-ff37-4e62-9f0a-06a885a824b9-kube-api-access-fcz2x\") on node \"crc\" DevicePath \"\"" Dec 06 06:37:15 crc kubenswrapper[4706]: I1206 06:37:15.614779 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="77b9476cfd91569d204fdf7029ad27f2056dc4872c33a9a6a9990f3e1fe1a171" Dec 06 06:37:15 crc kubenswrapper[4706]: I1206 06:37:15.614842 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-xfnmh" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.024740 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t68mk/crc-debug-hmrvv"] Dec 06 06:37:16 crc kubenswrapper[4706]: E1206 06:37:16.025121 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="05ff8b3c-ff37-4e62-9f0a-06a885a824b9" containerName="container-00" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.025133 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="05ff8b3c-ff37-4e62-9f0a-06a885a824b9" containerName="container-00" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.025329 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="05ff8b3c-ff37-4e62-9f0a-06a885a824b9" containerName="container-00" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.025930 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.048508 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="05ff8b3c-ff37-4e62-9f0a-06a885a824b9" path="/var/lib/kubelet/pods/05ff8b3c-ff37-4e62-9f0a-06a885a824b9/volumes" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.112464 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqsz5\" (UniqueName: \"kubernetes.io/projected/509f9d5f-88c4-4255-8816-b2af75ead27a-kube-api-access-dqsz5\") pod \"crc-debug-hmrvv\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.112700 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/509f9d5f-88c4-4255-8816-b2af75ead27a-host\") pod \"crc-debug-hmrvv\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.214965 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqsz5\" (UniqueName: \"kubernetes.io/projected/509f9d5f-88c4-4255-8816-b2af75ead27a-kube-api-access-dqsz5\") pod \"crc-debug-hmrvv\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.215139 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/509f9d5f-88c4-4255-8816-b2af75ead27a-host\") pod \"crc-debug-hmrvv\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.215293 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/509f9d5f-88c4-4255-8816-b2af75ead27a-host\") pod \"crc-debug-hmrvv\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.230720 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqsz5\" (UniqueName: \"kubernetes.io/projected/509f9d5f-88c4-4255-8816-b2af75ead27a-kube-api-access-dqsz5\") pod \"crc-debug-hmrvv\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.343958 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.627301 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" event={"ID":"509f9d5f-88c4-4255-8816-b2af75ead27a","Type":"ContainerStarted","Data":"75eacb46e68a6e88f4acd02fd9296892c5dc4ace5e7427fed0cddd83d66042b5"} Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.627366 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" event={"ID":"509f9d5f-88c4-4255-8816-b2af75ead27a","Type":"ContainerStarted","Data":"7b96a1f9596d00aa8ada3b3acc73197d919c68990f428cc26c56ee6a54f2f488"} Dec 06 06:37:16 crc kubenswrapper[4706]: I1206 06:37:16.640321 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" podStartSLOduration=0.640301088 podStartE2EDuration="640.301088ms" podCreationTimestamp="2025-12-06 06:37:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-12-06 06:37:16.637345298 +0000 UTC m=+4658.965169242" watchObservedRunningTime="2025-12-06 06:37:16.640301088 +0000 UTC m=+4658.968125032" Dec 06 06:37:17 crc kubenswrapper[4706]: I1206 06:37:17.640733 4706 generic.go:334] "Generic (PLEG): container finished" podID="509f9d5f-88c4-4255-8816-b2af75ead27a" containerID="75eacb46e68a6e88f4acd02fd9296892c5dc4ace5e7427fed0cddd83d66042b5" exitCode=0 Dec 06 06:37:17 crc kubenswrapper[4706]: I1206 06:37:17.640780 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" event={"ID":"509f9d5f-88c4-4255-8816-b2af75ead27a","Type":"ContainerDied","Data":"75eacb46e68a6e88f4acd02fd9296892c5dc4ace5e7427fed0cddd83d66042b5"} Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.044241 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:37:18 crc kubenswrapper[4706]: E1206 06:37:18.044618 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.750291 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.794100 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t68mk/crc-debug-hmrvv"] Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.804404 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t68mk/crc-debug-hmrvv"] Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.874636 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/509f9d5f-88c4-4255-8816-b2af75ead27a-host\") pod \"509f9d5f-88c4-4255-8816-b2af75ead27a\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.874731 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/509f9d5f-88c4-4255-8816-b2af75ead27a-host" (OuterVolumeSpecName: "host") pod "509f9d5f-88c4-4255-8816-b2af75ead27a" (UID: "509f9d5f-88c4-4255-8816-b2af75ead27a"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.874760 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqsz5\" (UniqueName: \"kubernetes.io/projected/509f9d5f-88c4-4255-8816-b2af75ead27a-kube-api-access-dqsz5\") pod \"509f9d5f-88c4-4255-8816-b2af75ead27a\" (UID: \"509f9d5f-88c4-4255-8816-b2af75ead27a\") " Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.875272 4706 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/509f9d5f-88c4-4255-8816-b2af75ead27a-host\") on node \"crc\" DevicePath \"\"" Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.880733 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/509f9d5f-88c4-4255-8816-b2af75ead27a-kube-api-access-dqsz5" (OuterVolumeSpecName: "kube-api-access-dqsz5") pod "509f9d5f-88c4-4255-8816-b2af75ead27a" (UID: "509f9d5f-88c4-4255-8816-b2af75ead27a"). InnerVolumeSpecName "kube-api-access-dqsz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:37:18 crc kubenswrapper[4706]: I1206 06:37:18.977022 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqsz5\" (UniqueName: \"kubernetes.io/projected/509f9d5f-88c4-4255-8816-b2af75ead27a-kube-api-access-dqsz5\") on node \"crc\" DevicePath \"\"" Dec 06 06:37:19 crc kubenswrapper[4706]: I1206 06:37:19.668396 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b96a1f9596d00aa8ada3b3acc73197d919c68990f428cc26c56ee6a54f2f488" Dec 06 06:37:19 crc kubenswrapper[4706]: I1206 06:37:19.668472 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-hmrvv" Dec 06 06:37:19 crc kubenswrapper[4706]: I1206 06:37:19.922001 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-t68mk/crc-debug-ktdkq"] Dec 06 06:37:19 crc kubenswrapper[4706]: E1206 06:37:19.923509 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="509f9d5f-88c4-4255-8816-b2af75ead27a" containerName="container-00" Dec 06 06:37:19 crc kubenswrapper[4706]: I1206 06:37:19.923538 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="509f9d5f-88c4-4255-8816-b2af75ead27a" containerName="container-00" Dec 06 06:37:19 crc kubenswrapper[4706]: I1206 06:37:19.923771 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="509f9d5f-88c4-4255-8816-b2af75ead27a" containerName="container-00" Dec 06 06:37:19 crc kubenswrapper[4706]: I1206 06:37:19.924568 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.046694 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="509f9d5f-88c4-4255-8816-b2af75ead27a" path="/var/lib/kubelet/pods/509f9d5f-88c4-4255-8816-b2af75ead27a/volumes" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.098063 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fqcn\" (UniqueName: \"kubernetes.io/projected/be436cb1-88ac-460e-9075-82d5f6397a7a-kube-api-access-7fqcn\") pod \"crc-debug-ktdkq\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.098238 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be436cb1-88ac-460e-9075-82d5f6397a7a-host\") pod \"crc-debug-ktdkq\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.199534 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be436cb1-88ac-460e-9075-82d5f6397a7a-host\") pod \"crc-debug-ktdkq\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.199622 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fqcn\" (UniqueName: \"kubernetes.io/projected/be436cb1-88ac-460e-9075-82d5f6397a7a-kube-api-access-7fqcn\") pod \"crc-debug-ktdkq\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.199811 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be436cb1-88ac-460e-9075-82d5f6397a7a-host\") pod \"crc-debug-ktdkq\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.236113 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fqcn\" (UniqueName: \"kubernetes.io/projected/be436cb1-88ac-460e-9075-82d5f6397a7a-kube-api-access-7fqcn\") pod \"crc-debug-ktdkq\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.241635 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.679756 4706 generic.go:334] "Generic (PLEG): container finished" podID="be436cb1-88ac-460e-9075-82d5f6397a7a" containerID="24a681698eef2b27c96608ac276182169f327316b62a1cb8f8a94c31fafaa990" exitCode=0 Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.679979 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-ktdkq" event={"ID":"be436cb1-88ac-460e-9075-82d5f6397a7a","Type":"ContainerDied","Data":"24a681698eef2b27c96608ac276182169f327316b62a1cb8f8a94c31fafaa990"} Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.680107 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/crc-debug-ktdkq" event={"ID":"be436cb1-88ac-460e-9075-82d5f6397a7a","Type":"ContainerStarted","Data":"fe790a4058d10804136d86e697fd28b0849eeed565bc2c2cbe709ccbe1efed66"} Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.725119 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t68mk/crc-debug-ktdkq"] Dec 06 06:37:20 crc kubenswrapper[4706]: I1206 06:37:20.732247 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t68mk/crc-debug-ktdkq"] Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.315813 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.444194 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fqcn\" (UniqueName: \"kubernetes.io/projected/be436cb1-88ac-460e-9075-82d5f6397a7a-kube-api-access-7fqcn\") pod \"be436cb1-88ac-460e-9075-82d5f6397a7a\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.444428 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be436cb1-88ac-460e-9075-82d5f6397a7a-host\") pod \"be436cb1-88ac-460e-9075-82d5f6397a7a\" (UID: \"be436cb1-88ac-460e-9075-82d5f6397a7a\") " Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.444751 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/be436cb1-88ac-460e-9075-82d5f6397a7a-host" (OuterVolumeSpecName: "host") pod "be436cb1-88ac-460e-9075-82d5f6397a7a" (UID: "be436cb1-88ac-460e-9075-82d5f6397a7a"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.450620 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be436cb1-88ac-460e-9075-82d5f6397a7a-kube-api-access-7fqcn" (OuterVolumeSpecName: "kube-api-access-7fqcn") pod "be436cb1-88ac-460e-9075-82d5f6397a7a" (UID: "be436cb1-88ac-460e-9075-82d5f6397a7a"). InnerVolumeSpecName "kube-api-access-7fqcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.545979 4706 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be436cb1-88ac-460e-9075-82d5f6397a7a-host\") on node \"crc\" DevicePath \"\"" Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.546020 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fqcn\" (UniqueName: \"kubernetes.io/projected/be436cb1-88ac-460e-9075-82d5f6397a7a-kube-api-access-7fqcn\") on node \"crc\" DevicePath \"\"" Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.698650 4706 scope.go:117] "RemoveContainer" containerID="24a681698eef2b27c96608ac276182169f327316b62a1cb8f8a94c31fafaa990" Dec 06 06:37:22 crc kubenswrapper[4706]: I1206 06:37:22.698692 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/crc-debug-ktdkq" Dec 06 06:37:24 crc kubenswrapper[4706]: I1206 06:37:24.046531 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be436cb1-88ac-460e-9075-82d5f6397a7a" path="/var/lib/kubelet/pods/be436cb1-88ac-460e-9075-82d5f6397a7a/volumes" Dec 06 06:37:33 crc kubenswrapper[4706]: I1206 06:37:33.061403 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:37:33 crc kubenswrapper[4706]: E1206 06:37:33.062433 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:37:45 crc kubenswrapper[4706]: I1206 06:37:45.502357 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5d57ffb9bb-t86s7_fb031ade-7dae-40f8-a748-8842d00f6a37/barbican-api/0.log" Dec 06 06:37:45 crc kubenswrapper[4706]: I1206 06:37:45.678630 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5d57ffb9bb-t86s7_fb031ade-7dae-40f8-a748-8842d00f6a37/barbican-api-log/0.log" Dec 06 06:37:45 crc kubenswrapper[4706]: I1206 06:37:45.754865 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5985b9fc68-gt5hx_99fd71cd-f273-4e5f-91e1-2816f523b9ce/barbican-keystone-listener/0.log" Dec 06 06:37:45 crc kubenswrapper[4706]: I1206 06:37:45.787420 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-5985b9fc68-gt5hx_99fd71cd-f273-4e5f-91e1-2816f523b9ce/barbican-keystone-listener-log/0.log" Dec 06 06:37:45 crc kubenswrapper[4706]: I1206 06:37:45.941675 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-99698bc47-f5twk_7ea78fc3-49cb-46cb-a450-c3c0990135fb/barbican-worker/0.log" Dec 06 06:37:45 crc kubenswrapper[4706]: I1206 06:37:45.947598 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-99698bc47-f5twk_7ea78fc3-49cb-46cb-a450-c3c0990135fb/barbican-worker-log/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.036619 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:37:46 crc kubenswrapper[4706]: E1206 06:37:46.036891 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.073096 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-jf7hv_ab55260b-0613-4be9-b0e2-e1470cdb018d/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.164136 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/ceilometer-central-agent/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.282444 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/ceilometer-notification-agent/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.290648 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/proxy-httpd/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.377381 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_b3b0627f-70db-4eb0-8d16-c93648772685/sg-core/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.485479 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_3631398b-6bec-44d1-bf3b-19f8e8114c5c/cinder-api-log/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.529427 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_3631398b-6bec-44d1-bf3b-19f8e8114c5c/cinder-api/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.669457 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_9823a9c2-7e13-4c23-a9ea-af6e03c32773/cinder-scheduler/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.754929 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_9823a9c2-7e13-4c23-a9ea-af6e03c32773/probe/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.896436 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-gqlm2_3dc977db-985f-4d5a-8735-0c417c7be72c/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:46 crc kubenswrapper[4706]: I1206 06:37:46.958943 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-gdwj9_a71e1253-a40e-4b2b-b911-c15a88da2be5/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.316440 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-cb6ffcf87-5dz8j_e7068fc5-ddf3-4a32-bf1a-803684a95dd3/init/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.512023 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-cb6ffcf87-5dz8j_e7068fc5-ddf3-4a32-bf1a-803684a95dd3/init/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.525479 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-cb6ffcf87-5dz8j_e7068fc5-ddf3-4a32-bf1a-803684a95dd3/dnsmasq-dns/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.548845 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-7pb7x_582f8518-3c87-496d-b057-b2f66658a731/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.720637 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c50f78da-9727-4908-ba76-4a3dbc4455c7/glance-log/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.732058 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_c50f78da-9727-4908-ba76-4a3dbc4455c7/glance-httpd/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.929397 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_fdfe9ea0-e897-4071-9b1c-dcdd908b549d/glance-httpd/0.log" Dec 06 06:37:47 crc kubenswrapper[4706]: I1206 06:37:47.932946 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_fdfe9ea0-e897-4071-9b1c-dcdd908b549d/glance-log/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.091465 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-8f474c4b8-xgvj4_8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f/horizon/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.354570 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-5hrbg_cc1a17c8-f209-4fb0-9fd5-d17086f90eba/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.506797 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-cpbpm_82ebe200-9dff-4f3b-8bf1-e1a6feee951c/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.559067 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-8f474c4b8-xgvj4_8fc4f0d3-7ebd-42d4-b0f3-cb0b5974fd0f/horizon-log/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.784942 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6c6f7f7c88-ptmf7_0cbad2bc-87d3-4f51-aed8-36d386af56eb/keystone-api/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.795866 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29416681-gqk5x_32439274-bc88-4aa9-b040-98212cda2b38/keystone-cron/0.log" Dec 06 06:37:48 crc kubenswrapper[4706]: I1206 06:37:48.948469 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_66632781-9905-4f3f-8945-92ca177cf2bc/kube-state-metrics/0.log" Dec 06 06:37:49 crc kubenswrapper[4706]: I1206 06:37:49.105302 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-6nsmb_5620e36a-01d5-4282-ad0c-a3e96dc38329/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:49 crc kubenswrapper[4706]: I1206 06:37:49.369096 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c45f4d87f-7sd44_d8a2aaf5-7417-43c4-9562-2df330329adf/neutron-api/0.log" Dec 06 06:37:49 crc kubenswrapper[4706]: I1206 06:37:49.398988 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-5c45f4d87f-7sd44_d8a2aaf5-7417-43c4-9562-2df330329adf/neutron-httpd/0.log" Dec 06 06:37:49 crc kubenswrapper[4706]: I1206 06:37:49.520164 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-nk4zx_d67f85a9-c64e-42f0-b686-bfb179dccc76/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.053791 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_085d0127-557c-49a2-80f4-2a86fed685cc/nova-api-log/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.126192 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_635ff13a-9863-4ae2-84df-78df1c359b9e/nova-cell0-conductor-conductor/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.440877 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_30a8debc-3590-46cb-9042-5cf8fe5a87d6/nova-cell1-conductor-conductor/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.533462 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_085d0127-557c-49a2-80f4-2a86fed685cc/nova-api-api/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.538090 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_b4142f86-6823-4e49-9a0e-564cdf8d043b/nova-cell1-novncproxy-novncproxy/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.715485 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-kfg84_c4a06494-e4f9-427e-b7e2-dad0c843d44a/nova-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:50 crc kubenswrapper[4706]: I1206 06:37:50.822408 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_784eb2e8-d56e-4523-86cf-b67f953db54d/nova-metadata-log/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.165666 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_08955916-6689-445e-830d-6fbfe9a2f460/mysql-bootstrap/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.206426 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_24bb9983-5fec-49b8-9cff-cb2c111af5b9/nova-scheduler-scheduler/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.361806 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_08955916-6689-445e-830d-6fbfe9a2f460/mysql-bootstrap/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.366661 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_08955916-6689-445e-830d-6fbfe9a2f460/galera/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.589889 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_74e1bb57-a746-472b-a3b1-ffb875c658e4/mysql-bootstrap/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.776060 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_74e1bb57-a746-472b-a3b1-ffb875c658e4/mysql-bootstrap/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.823481 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_74e1bb57-a746-472b-a3b1-ffb875c658e4/galera/0.log" Dec 06 06:37:51 crc kubenswrapper[4706]: I1206 06:37:51.975258 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_2d5b5a38-b853-47de-ada1-1d7c240e84e4/openstackclient/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.056413 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-cbrg2_cde7e1a3-dd72-47aa-a0b5-117bc2c53885/ovn-controller/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.235320 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-5wn4n_bc140eba-adb0-407f-8472-1270d4fc5263/openstack-network-exporter/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.313547 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_784eb2e8-d56e-4523-86cf-b67f953db54d/nova-metadata-metadata/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.440006 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovsdb-server-init/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.628503 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovsdb-server-init/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.689954 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovs-vswitchd/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.702930 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-cj4kx_cbdbd121-5030-4488-9425-7548fb291906/ovsdb-server/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.874504 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-wb66l_24cc16ad-5e43-4d54-bdf8-69d4f319907c/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.967018 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_0d7d6b1e-41f4-4140-a752-bcf110cf3bd5/openstack-network-exporter/0.log" Dec 06 06:37:52 crc kubenswrapper[4706]: I1206 06:37:52.996253 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_0d7d6b1e-41f4-4140-a752-bcf110cf3bd5/ovn-northd/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.165550 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c330d787-77c8-4014-85a5-7d1bcf73836b/openstack-network-exporter/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.166310 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_c330d787-77c8-4014-85a5-7d1bcf73836b/ovsdbserver-nb/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.384837 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad/openstack-network-exporter/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.390613 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_51b1b8ca-3f0f-47f0-bfad-860eaa7f19ad/ovsdbserver-sb/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.553528 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-789868f976-vz5nh_8507b27e-a504-499e-bfea-e8c0397ff528/placement-api/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.681414 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-789868f976-vz5nh_8507b27e-a504-499e-bfea-e8c0397ff528/placement-log/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.685369 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_965d89e8-6db9-49d7-b516-ee4039b050eb/setup-container/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.948415 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_965d89e8-6db9-49d7-b516-ee4039b050eb/rabbitmq/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.970317 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_6be686b8-8844-4721-8b68-cd8b4d338517/setup-container/0.log" Dec 06 06:37:53 crc kubenswrapper[4706]: I1206 06:37:53.988088 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_965d89e8-6db9-49d7-b516-ee4039b050eb/setup-container/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.170317 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_6be686b8-8844-4721-8b68-cd8b4d338517/setup-container/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.377441 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_6be686b8-8844-4721-8b68-cd8b4d338517/rabbitmq/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.390944 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-w7vg5_5d6e830f-730f-43e2-8218-e247e8a663df/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.503954 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-dpmdb_0df1eee4-ea9f-4409-b17c-8b6b37985814/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.609886 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-qnc75_7051aff0-e824-43eb-a501-3c02108f96ee/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.751294 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-6bt4j_70676b1a-d6a7-4b05-b15a-fa2661a1a77b/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:54 crc kubenswrapper[4706]: I1206 06:37:54.969521 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-gf4l9_fe026aef-fa96-451a-b38d-de4406116ea7/ssh-known-hosts-edpm-deployment/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.057203 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7f666db4c-wsc2b_38ce5378-a514-4454-8f74-73226df682e6/proxy-server/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.197313 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7f666db4c-wsc2b_38ce5378-a514-4454-8f74-73226df682e6/proxy-httpd/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.227566 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-9pw6t_abd1400e-de80-48fe-bad4-3e3c3af98355/swift-ring-rebalance/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.388274 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-auditor/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.444107 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-reaper/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.479305 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-replicator/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.531528 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/account-server/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.690665 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-replicator/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.695691 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-auditor/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.748623 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-updater/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.774854 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/container-server/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.918055 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-expirer/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.965458 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-auditor/0.log" Dec 06 06:37:55 crc kubenswrapper[4706]: I1206 06:37:55.982833 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-server/0.log" Dec 06 06:37:56 crc kubenswrapper[4706]: I1206 06:37:56.020585 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-replicator/0.log" Dec 06 06:37:56 crc kubenswrapper[4706]: I1206 06:37:56.443477 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/object-updater/0.log" Dec 06 06:37:56 crc kubenswrapper[4706]: I1206 06:37:56.483825 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/swift-recon-cron/0.log" Dec 06 06:37:56 crc kubenswrapper[4706]: I1206 06:37:56.493516 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_91f74906-ec70-4b0c-a657-d075d18f488b/rsync/0.log" Dec 06 06:37:56 crc kubenswrapper[4706]: I1206 06:37:56.870937 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-7ddtb_19fbc54f-2695-4d41-9221-c5d2731510c1/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:37:56 crc kubenswrapper[4706]: I1206 06:37:56.875129 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_53ac9b54-4c61-4101-96d0-c247c09c0cdd/tempest-tests-tempest-tests-runner/0.log" Dec 06 06:37:57 crc kubenswrapper[4706]: I1206 06:37:57.035974 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:37:57 crc kubenswrapper[4706]: E1206 06:37:57.036332 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:37:57 crc kubenswrapper[4706]: I1206 06:37:57.083360 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_5711989a-45c2-4c7f-b728-3d5c0eb851a6/test-operator-logs-container/0.log" Dec 06 06:37:57 crc kubenswrapper[4706]: I1206 06:37:57.141552 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-jj2hq_6c4f877c-27aa-40eb-b5ff-2968f748a978/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Dec 06 06:38:06 crc kubenswrapper[4706]: I1206 06:38:06.774684 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_6dce87fa-1a80-4b4e-ac0d-5205d06a9ccf/memcached/0.log" Dec 06 06:38:12 crc kubenswrapper[4706]: I1206 06:38:12.035821 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:38:12 crc kubenswrapper[4706]: E1206 06:38:12.036495 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:38:24 crc kubenswrapper[4706]: I1206 06:38:24.769817 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/util/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.002074 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/pull/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.011974 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/pull/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.016195 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/util/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.213339 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/util/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.253557 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/pull/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.258167 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_01f2cf0099a01d783cd83d75d1567afafc81c3e54953f2f84acc93f279bqfbv_2add93a7-b496-4008-b764-b43a05be4967/extract/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.430336 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-mpkjv_31b78248-5727-4a30-95ab-d75acc5a752b/kube-rbac-proxy/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.459798 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-mpkjv_31b78248-5727-4a30-95ab-d75acc5a752b/manager/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.496911 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-msm2n_9e547dc3-41db-48ab-b791-885c0f98f4c8/kube-rbac-proxy/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.614721 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-msm2n_9e547dc3-41db-48ab-b791-885c0f98f4c8/manager/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.666249 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-wzlpz_74049eb3-6721-4234-80cd-01b530d2d9e5/kube-rbac-proxy/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.699298 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-wzlpz_74049eb3-6721-4234-80cd-01b530d2d9e5/manager/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.836489 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-77987cd8cd-vm2sj_de139c22-08fa-4b45-abda-af9394c16eac/kube-rbac-proxy/0.log" Dec 06 06:38:25 crc kubenswrapper[4706]: I1206 06:38:25.958204 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-77987cd8cd-vm2sj_de139c22-08fa-4b45-abda-af9394c16eac/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.026032 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-qr75r_646d8bbb-f505-42f9-a23d-15b999c5acce/kube-rbac-proxy/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.029033 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-qr75r_646d8bbb-f505-42f9-a23d-15b999c5acce/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.035862 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:38:26 crc kubenswrapper[4706]: E1206 06:38:26.036153 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.146234 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-fcp7z_b6524ab6-7d15-4cf4-b3b2-dc9f0d014930/kube-rbac-proxy/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.257658 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-fcp7z_b6524ab6-7d15-4cf4-b3b2-dc9f0d014930/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.313604 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x7wwl_0e17be2a-d936-4d91-862a-b92014212bf6/kube-rbac-proxy/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.508565 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-x7wwl_0e17be2a-d936-4d91-862a-b92014212bf6/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.547659 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-jspvh_eacc98a4-22bf-4a38-8de0-2bf6fd395572/kube-rbac-proxy/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.551293 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-jspvh_eacc98a4-22bf-4a38-8de0-2bf6fd395572/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.703010 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-jvwv2_34163fc1-16c7-4942-9eda-5afb77180d00/kube-rbac-proxy/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.798164 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7765d96ddf-jvwv2_34163fc1-16c7-4942-9eda-5afb77180d00/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.883000 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7c79b5df47-xctf2_b67589f2-8ee8-43a3-aaf9-e1767c0a75c5/kube-rbac-proxy/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.906235 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7c79b5df47-xctf2_b67589f2-8ee8-43a3-aaf9-e1767c0a75c5/manager/0.log" Dec 06 06:38:26 crc kubenswrapper[4706]: I1206 06:38:26.979890 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-nhzq9_5f25d928-9f7a-4d1b-b1bb-abc58dad2080/kube-rbac-proxy/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.129453 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-nhzq9_5f25d928-9f7a-4d1b-b1bb-abc58dad2080/manager/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.184849 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-bkvhv_d28af7d8-b64b-48f1-9ac1-7f1cfc361751/kube-rbac-proxy/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.233512 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-bkvhv_d28af7d8-b64b-48f1-9ac1-7f1cfc361751/manager/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.378243 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-qfpfj_0928e1f4-7912-465f-a991-9d0dda0a42d1/kube-rbac-proxy/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.438365 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-697bc559fc-qfpfj_0928e1f4-7912-465f-a991-9d0dda0a42d1/manager/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.560259 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-k5hqn_b980759b-88cf-47ee-b7b0-12ebaddba6cd/manager/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.576245 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-998648c74-k5hqn_b980759b-88cf-47ee-b7b0-12ebaddba6cd/kube-rbac-proxy/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.678087 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd455bk5_09479c44-e706-4f72-a1f3-6b71d4b29f0b/kube-rbac-proxy/0.log" Dec 06 06:38:27 crc kubenswrapper[4706]: I1206 06:38:27.761449 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-64bc77cfd455bk5_09479c44-e706-4f72-a1f3-6b71d4b29f0b/manager/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.096545 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-8stsm_c7986937-a648-4cc0-89ae-e718dcccffad/registry-server/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.241827 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5964599cfc-xxv5r_235972bf-6d17-4167-b41f-98483ea3f1ba/operator/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.349300 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-q9dk8_9914167a-34c0-42fc-ac0c-af6f866b437f/kube-rbac-proxy/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.551914 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-b6456fdb6-q9dk8_9914167a-34c0-42fc-ac0c-af6f866b437f/manager/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.668420 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-gz7v6_47a5741f-61c5-4de3-b020-50c25f0570f2/kube-rbac-proxy/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.716801 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-gz7v6_47a5741f-61c5-4de3-b020-50c25f0570f2/manager/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.860465 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-pmfhv_2d6df005-5a24-47f7-a1a2-a30e6b8ab9fb/operator/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.948339 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-jc6r8_73d3329e-7a93-4d32-b7ba-0d5d6b468432/kube-rbac-proxy/0.log" Dec 06 06:38:28 crc kubenswrapper[4706]: I1206 06:38:28.959038 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-7f6f47b7b7-lmnn4_36973f56-f6d5-4a12-b86e-4ad7bcb3df6f/manager/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.025467 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-jc6r8_73d3329e-7a93-4d32-b7ba-0d5d6b468432/manager/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.096236 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-lpjp5_bfd8649f-6345-40be-9193-e80b2ce0c1dc/kube-rbac-proxy/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.167418 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-lpjp5_bfd8649f-6345-40be-9193-e80b2ce0c1dc/manager/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.259656 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-qmnhr_ff8a3a6e-0623-417c-8e02-f16f34e3bfe9/kube-rbac-proxy/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.296522 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-5854674fcc-qmnhr_ff8a3a6e-0623-417c-8e02-f16f34e3bfe9/manager/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.381826 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-tx6k9_1d93b83c-6e45-44bf-b9b1-d6163c85d6b1/kube-rbac-proxy/0.log" Dec 06 06:38:29 crc kubenswrapper[4706]: I1206 06:38:29.416496 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-769dc69bc-tx6k9_1d93b83c-6e45-44bf-b9b1-d6163c85d6b1/manager/0.log" Dec 06 06:38:37 crc kubenswrapper[4706]: I1206 06:38:37.036547 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:38:37 crc kubenswrapper[4706]: E1206 06:38:37.038051 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:38:46 crc kubenswrapper[4706]: I1206 06:38:46.666300 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-wdsds_a417f08a-e64f-4a02-abb3-bee2049eb2e7/control-plane-machine-set-operator/0.log" Dec 06 06:38:46 crc kubenswrapper[4706]: I1206 06:38:46.796797 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-5g2s4_4230f0fb-f05e-4ae6-9755-db33865a6c33/kube-rbac-proxy/0.log" Dec 06 06:38:46 crc kubenswrapper[4706]: I1206 06:38:46.830249 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-5g2s4_4230f0fb-f05e-4ae6-9755-db33865a6c33/machine-api-operator/0.log" Dec 06 06:38:51 crc kubenswrapper[4706]: I1206 06:38:51.036491 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:38:51 crc kubenswrapper[4706]: E1206 06:38:51.037386 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.147561 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n4tqj"] Dec 06 06:38:52 crc kubenswrapper[4706]: E1206 06:38:52.148377 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be436cb1-88ac-460e-9075-82d5f6397a7a" containerName="container-00" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.148393 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="be436cb1-88ac-460e-9075-82d5f6397a7a" containerName="container-00" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.148623 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="be436cb1-88ac-460e-9075-82d5f6397a7a" containerName="container-00" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.150002 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.172705 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n4tqj"] Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.335014 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-catalog-content\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.335075 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-utilities\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.335149 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbcvc\" (UniqueName: \"kubernetes.io/projected/8154a3c6-3bf3-404f-a12a-63640d4fea1d-kube-api-access-zbcvc\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.436524 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-catalog-content\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.436586 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-utilities\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.436628 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbcvc\" (UniqueName: \"kubernetes.io/projected/8154a3c6-3bf3-404f-a12a-63640d4fea1d-kube-api-access-zbcvc\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.437441 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-utilities\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.437587 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-catalog-content\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.456514 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbcvc\" (UniqueName: \"kubernetes.io/projected/8154a3c6-3bf3-404f-a12a-63640d4fea1d-kube-api-access-zbcvc\") pod \"redhat-marketplace-n4tqj\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:52 crc kubenswrapper[4706]: I1206 06:38:52.479770 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:38:53 crc kubenswrapper[4706]: I1206 06:38:53.299490 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n4tqj"] Dec 06 06:38:53 crc kubenswrapper[4706]: I1206 06:38:53.531411 4706 generic.go:334] "Generic (PLEG): container finished" podID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerID="63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd" exitCode=0 Dec 06 06:38:53 crc kubenswrapper[4706]: I1206 06:38:53.531484 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n4tqj" event={"ID":"8154a3c6-3bf3-404f-a12a-63640d4fea1d","Type":"ContainerDied","Data":"63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd"} Dec 06 06:38:53 crc kubenswrapper[4706]: I1206 06:38:53.531744 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n4tqj" event={"ID":"8154a3c6-3bf3-404f-a12a-63640d4fea1d","Type":"ContainerStarted","Data":"2aa1c8888900f9f29163dd6f27d46e5541c476fbcf77d3e1ffa825b0e9016a40"} Dec 06 06:38:53 crc kubenswrapper[4706]: I1206 06:38:53.533419 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:38:54 crc kubenswrapper[4706]: I1206 06:38:54.541396 4706 generic.go:334] "Generic (PLEG): container finished" podID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerID="f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16" exitCode=0 Dec 06 06:38:54 crc kubenswrapper[4706]: I1206 06:38:54.541568 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n4tqj" event={"ID":"8154a3c6-3bf3-404f-a12a-63640d4fea1d","Type":"ContainerDied","Data":"f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16"} Dec 06 06:38:55 crc kubenswrapper[4706]: I1206 06:38:55.551663 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n4tqj" event={"ID":"8154a3c6-3bf3-404f-a12a-63640d4fea1d","Type":"ContainerStarted","Data":"d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44"} Dec 06 06:38:55 crc kubenswrapper[4706]: I1206 06:38:55.574843 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n4tqj" podStartSLOduration=2.092514327 podStartE2EDuration="3.574826902s" podCreationTimestamp="2025-12-06 06:38:52 +0000 UTC" firstStartedPulling="2025-12-06 06:38:53.533199031 +0000 UTC m=+4755.861022975" lastFinishedPulling="2025-12-06 06:38:55.015511606 +0000 UTC m=+4757.343335550" observedRunningTime="2025-12-06 06:38:55.571928583 +0000 UTC m=+4757.899752527" watchObservedRunningTime="2025-12-06 06:38:55.574826902 +0000 UTC m=+4757.902650846" Dec 06 06:38:59 crc kubenswrapper[4706]: I1206 06:38:59.259952 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-pf8gt_44e622ec-7780-489c-bcf0-575ec84dc213/cert-manager-controller/0.log" Dec 06 06:38:59 crc kubenswrapper[4706]: I1206 06:38:59.382464 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fbpqk_58b45d75-86f1-4092-89ba-a1f924030512/cert-manager-cainjector/0.log" Dec 06 06:38:59 crc kubenswrapper[4706]: I1206 06:38:59.484856 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-22w82_052717cc-1d2a-4e9a-a6a3-897c1d529b1e/cert-manager-webhook/0.log" Dec 06 06:39:02 crc kubenswrapper[4706]: I1206 06:39:02.480545 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:39:02 crc kubenswrapper[4706]: I1206 06:39:02.481002 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:39:02 crc kubenswrapper[4706]: I1206 06:39:02.942373 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:39:03 crc kubenswrapper[4706]: I1206 06:39:03.008983 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:39:03 crc kubenswrapper[4706]: I1206 06:39:03.178825 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n4tqj"] Dec 06 06:39:04 crc kubenswrapper[4706]: I1206 06:39:04.036539 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:39:04 crc kubenswrapper[4706]: E1206 06:39:04.037028 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:39:04 crc kubenswrapper[4706]: I1206 06:39:04.650172 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n4tqj" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="registry-server" containerID="cri-o://d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44" gracePeriod=2 Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.101148 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.184495 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-utilities\") pod \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.184642 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-catalog-content\") pod \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.184689 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbcvc\" (UniqueName: \"kubernetes.io/projected/8154a3c6-3bf3-404f-a12a-63640d4fea1d-kube-api-access-zbcvc\") pod \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\" (UID: \"8154a3c6-3bf3-404f-a12a-63640d4fea1d\") " Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.185464 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-utilities" (OuterVolumeSpecName: "utilities") pod "8154a3c6-3bf3-404f-a12a-63640d4fea1d" (UID: "8154a3c6-3bf3-404f-a12a-63640d4fea1d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.202452 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8154a3c6-3bf3-404f-a12a-63640d4fea1d-kube-api-access-zbcvc" (OuterVolumeSpecName: "kube-api-access-zbcvc") pod "8154a3c6-3bf3-404f-a12a-63640d4fea1d" (UID: "8154a3c6-3bf3-404f-a12a-63640d4fea1d"). InnerVolumeSpecName "kube-api-access-zbcvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.208480 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8154a3c6-3bf3-404f-a12a-63640d4fea1d" (UID: "8154a3c6-3bf3-404f-a12a-63640d4fea1d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.286071 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.286321 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbcvc\" (UniqueName: \"kubernetes.io/projected/8154a3c6-3bf3-404f-a12a-63640d4fea1d-kube-api-access-zbcvc\") on node \"crc\" DevicePath \"\"" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.286330 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8154a3c6-3bf3-404f-a12a-63640d4fea1d-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.661403 4706 generic.go:334] "Generic (PLEG): container finished" podID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerID="d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44" exitCode=0 Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.661447 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n4tqj" event={"ID":"8154a3c6-3bf3-404f-a12a-63640d4fea1d","Type":"ContainerDied","Data":"d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44"} Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.661455 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n4tqj" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.661474 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n4tqj" event={"ID":"8154a3c6-3bf3-404f-a12a-63640d4fea1d","Type":"ContainerDied","Data":"2aa1c8888900f9f29163dd6f27d46e5541c476fbcf77d3e1ffa825b0e9016a40"} Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.661495 4706 scope.go:117] "RemoveContainer" containerID="d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.682614 4706 scope.go:117] "RemoveContainer" containerID="f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.705540 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n4tqj"] Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.713218 4706 scope.go:117] "RemoveContainer" containerID="63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.725348 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n4tqj"] Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.759522 4706 scope.go:117] "RemoveContainer" containerID="d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44" Dec 06 06:39:05 crc kubenswrapper[4706]: E1206 06:39:05.760000 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44\": container with ID starting with d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44 not found: ID does not exist" containerID="d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.760071 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44"} err="failed to get container status \"d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44\": rpc error: code = NotFound desc = could not find container \"d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44\": container with ID starting with d2c3e29e81b586203c779563c44aa6941a0bb85e80b11f51e7d00c48be613e44 not found: ID does not exist" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.760107 4706 scope.go:117] "RemoveContainer" containerID="f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16" Dec 06 06:39:05 crc kubenswrapper[4706]: E1206 06:39:05.760496 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16\": container with ID starting with f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16 not found: ID does not exist" containerID="f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.760526 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16"} err="failed to get container status \"f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16\": rpc error: code = NotFound desc = could not find container \"f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16\": container with ID starting with f1281a8379caaad006ce56dfb4bf7ab5b279a7c7366d6b7d4cd9176c53897e16 not found: ID does not exist" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.760544 4706 scope.go:117] "RemoveContainer" containerID="63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd" Dec 06 06:39:05 crc kubenswrapper[4706]: E1206 06:39:05.760819 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd\": container with ID starting with 63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd not found: ID does not exist" containerID="63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd" Dec 06 06:39:05 crc kubenswrapper[4706]: I1206 06:39:05.760861 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd"} err="failed to get container status \"63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd\": rpc error: code = NotFound desc = could not find container \"63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd\": container with ID starting with 63834f9b1b5bbf958ba67186c19232391332739eb47f776b58aa818987a6cddd not found: ID does not exist" Dec 06 06:39:06 crc kubenswrapper[4706]: I1206 06:39:06.047477 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" path="/var/lib/kubelet/pods/8154a3c6-3bf3-404f-a12a-63640d4fea1d/volumes" Dec 06 06:39:12 crc kubenswrapper[4706]: I1206 06:39:12.682673 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7fbb5f6569-8c75x_1d9e9551-a46a-42b6-a9b4-b78a3994239a/nmstate-console-plugin/0.log" Dec 06 06:39:12 crc kubenswrapper[4706]: I1206 06:39:12.849963 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-l4lvf_ab718c3d-1427-4fc0-b728-6925fca42caf/nmstate-handler/0.log" Dec 06 06:39:12 crc kubenswrapper[4706]: I1206 06:39:12.962433 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-5jtbm_eeea5f87-d6ea-47d3-86aa-4e5ed4562078/kube-rbac-proxy/0.log" Dec 06 06:39:12 crc kubenswrapper[4706]: I1206 06:39:12.976787 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-7f946cbc9-5jtbm_eeea5f87-d6ea-47d3-86aa-4e5ed4562078/nmstate-metrics/0.log" Dec 06 06:39:13 crc kubenswrapper[4706]: I1206 06:39:13.111098 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-5b5b58f5c8-xpljb_77c327d8-4531-43a9-991e-f913f7e1d02e/nmstate-operator/0.log" Dec 06 06:39:13 crc kubenswrapper[4706]: I1206 06:39:13.273966 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-5f6d4c5ccb-r55tp_aa5bcff8-fac7-4a00-b7f7-312f70ad11b2/nmstate-webhook/0.log" Dec 06 06:39:16 crc kubenswrapper[4706]: I1206 06:39:16.036819 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:39:16 crc kubenswrapper[4706]: E1206 06:39:16.037386 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.051619 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:39:28 crc kubenswrapper[4706]: E1206 06:39:28.052675 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.474067 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-5gg6v_7009f978-2926-401b-bb27-4378dac2d69a/kube-rbac-proxy/0.log" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.647927 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-f8648f98b-5gg6v_7009f978-2926-401b-bb27-4378dac2d69a/controller/0.log" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.712940 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.855396 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.871386 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.923238 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:39:28 crc kubenswrapper[4706]: I1206 06:39:28.944762 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.117587 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.120588 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.161752 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.232834 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.402798 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-frr-files/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.414666 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-metrics/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.422669 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/cp-reloader/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.449764 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/controller/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.929488 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/kube-rbac-proxy/0.log" Dec 06 06:39:29 crc kubenswrapper[4706]: I1206 06:39:29.960834 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/kube-rbac-proxy-frr/0.log" Dec 06 06:39:30 crc kubenswrapper[4706]: I1206 06:39:30.071135 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/frr-metrics/0.log" Dec 06 06:39:30 crc kubenswrapper[4706]: I1206 06:39:30.151027 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/reloader/0.log" Dec 06 06:39:30 crc kubenswrapper[4706]: I1206 06:39:30.311636 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7fcb986d4-hwzgm_90735168-5b70-4282-9d00-6ca91facf758/frr-k8s-webhook-server/0.log" Dec 06 06:39:30 crc kubenswrapper[4706]: I1206 06:39:30.579023 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-67f666fcfb-5vg8w_86959832-935a-46cc-85bc-f0b9b39340a7/manager/0.log" Dec 06 06:39:30 crc kubenswrapper[4706]: I1206 06:39:30.872102 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-d44d656bf-lksks_2f5bd7cc-4de4-4ff3-8c7a-5aeb79fd9726/webhook-server/0.log" Dec 06 06:39:30 crc kubenswrapper[4706]: I1206 06:39:30.959615 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-snzn5_6d8b765c-bd65-44fb-a959-b458e0c531a4/kube-rbac-proxy/0.log" Dec 06 06:39:31 crc kubenswrapper[4706]: I1206 06:39:31.143940 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-84c87_b7f21e0e-b99e-4c3a-9a01-6016f5e3542f/frr/0.log" Dec 06 06:39:31 crc kubenswrapper[4706]: I1206 06:39:31.366728 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-snzn5_6d8b765c-bd65-44fb-a959-b458e0c531a4/speaker/0.log" Dec 06 06:39:39 crc kubenswrapper[4706]: I1206 06:39:39.036421 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:39:39 crc kubenswrapper[4706]: E1206 06:39:39.037279 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.234385 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/util/0.log" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.398856 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/pull/0.log" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.503006 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/pull/0.log" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.583400 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/util/0.log" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.854346 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/pull/0.log" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.860161 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/extract/0.log" Dec 06 06:39:44 crc kubenswrapper[4706]: I1206 06:39:44.879352 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5064f9f8917b246f69f5d7fc025e7e6c34236c02bca31167615d38212f2xzbm_04c31578-f89b-4b78-86fb-7809b9fa2a21/util/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.037432 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/util/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.215929 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/util/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.218287 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/pull/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.236868 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/pull/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.407298 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/extract/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.421895 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/util/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.422179 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_af69698b82fdf008f5ff9e195cf808a654240e16b13dcd924b74994f83ltxx6_09b72ef4-066a-4aea-ad04-27d8bca291b8/pull/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.564730 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-utilities/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.735361 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-content/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.758297 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-utilities/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.776156 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-content/0.log" Dec 06 06:39:45 crc kubenswrapper[4706]: I1206 06:39:45.981586 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-utilities/0.log" Dec 06 06:39:46 crc kubenswrapper[4706]: I1206 06:39:46.060806 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/extract-content/0.log" Dec 06 06:39:46 crc kubenswrapper[4706]: I1206 06:39:46.220710 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-utilities/0.log" Dec 06 06:39:46 crc kubenswrapper[4706]: I1206 06:39:46.552614 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lvmnq_a4c7cd15-784b-4201-b0e2-f463f15e9bf6/registry-server/0.log" Dec 06 06:39:46 crc kubenswrapper[4706]: I1206 06:39:46.850756 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-content/0.log" Dec 06 06:39:46 crc kubenswrapper[4706]: I1206 06:39:46.880990 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-utilities/0.log" Dec 06 06:39:46 crc kubenswrapper[4706]: I1206 06:39:46.891358 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-content/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.043555 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-content/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.050161 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/extract-utilities/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.283379 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-9t9kd_03d87bf8-3c4d-4399-b7e9-dafa3bb98b4d/marketplace-operator/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.428460 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-utilities/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.469815 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-5nx52_4bf72222-ee0b-41a2-877e-1bd5c83b392a/registry-server/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.618711 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-content/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.637353 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-utilities/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.643644 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-content/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.871847 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-utilities/0.log" Dec 06 06:39:47 crc kubenswrapper[4706]: I1206 06:39:47.903834 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/extract-content/0.log" Dec 06 06:39:48 crc kubenswrapper[4706]: I1206 06:39:48.114793 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-6qchr_fa0b57c0-e802-4273-99c5-43e1c8fd1887/registry-server/0.log" Dec 06 06:39:48 crc kubenswrapper[4706]: I1206 06:39:48.557565 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-utilities/0.log" Dec 06 06:39:48 crc kubenswrapper[4706]: I1206 06:39:48.731442 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-utilities/0.log" Dec 06 06:39:48 crc kubenswrapper[4706]: I1206 06:39:48.742730 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-content/0.log" Dec 06 06:39:48 crc kubenswrapper[4706]: I1206 06:39:48.753112 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-content/0.log" Dec 06 06:39:48 crc kubenswrapper[4706]: I1206 06:39:48.914909 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-utilities/0.log" Dec 06 06:39:49 crc kubenswrapper[4706]: I1206 06:39:49.130325 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/extract-content/0.log" Dec 06 06:39:49 crc kubenswrapper[4706]: I1206 06:39:49.588923 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jmcdp_50601575-3e02-451a-97c3-24b24683e5b8/registry-server/0.log" Dec 06 06:39:52 crc kubenswrapper[4706]: I1206 06:39:52.035809 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:39:52 crc kubenswrapper[4706]: E1206 06:39:52.036326 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:40:05 crc kubenswrapper[4706]: I1206 06:40:05.036615 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:40:05 crc kubenswrapper[4706]: E1206 06:40:05.037518 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.389339 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-llkhh"] Dec 06 06:40:08 crc kubenswrapper[4706]: E1206 06:40:08.391023 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="extract-content" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.391131 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="extract-content" Dec 06 06:40:08 crc kubenswrapper[4706]: E1206 06:40:08.391200 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="registry-server" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.398156 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="registry-server" Dec 06 06:40:08 crc kubenswrapper[4706]: E1206 06:40:08.398399 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="extract-utilities" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.398466 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="extract-utilities" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.398956 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="8154a3c6-3bf3-404f-a12a-63640d4fea1d" containerName="registry-server" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.400507 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.404323 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llkhh"] Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.595848 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-utilities\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.595987 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-catalog-content\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.596079 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-snn44\" (UniqueName: \"kubernetes.io/projected/c88d3c32-5532-4f61-9bde-f52455f19d10-kube-api-access-snn44\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.702518 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-catalog-content\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.702611 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-snn44\" (UniqueName: \"kubernetes.io/projected/c88d3c32-5532-4f61-9bde-f52455f19d10-kube-api-access-snn44\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.702659 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-utilities\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.703234 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-utilities\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:08 crc kubenswrapper[4706]: I1206 06:40:08.703446 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-catalog-content\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:09 crc kubenswrapper[4706]: I1206 06:40:09.223944 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-snn44\" (UniqueName: \"kubernetes.io/projected/c88d3c32-5532-4f61-9bde-f52455f19d10-kube-api-access-snn44\") pod \"community-operators-llkhh\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:09 crc kubenswrapper[4706]: I1206 06:40:09.339989 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:09 crc kubenswrapper[4706]: I1206 06:40:09.858012 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-llkhh"] Dec 06 06:40:10 crc kubenswrapper[4706]: I1206 06:40:10.287119 4706 generic.go:334] "Generic (PLEG): container finished" podID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerID="07f1d62ecf65d8d8f50aecc085ad713d7e26fc30fbf83ce4362ae3f2afc5ef61" exitCode=0 Dec 06 06:40:10 crc kubenswrapper[4706]: I1206 06:40:10.287176 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerDied","Data":"07f1d62ecf65d8d8f50aecc085ad713d7e26fc30fbf83ce4362ae3f2afc5ef61"} Dec 06 06:40:10 crc kubenswrapper[4706]: I1206 06:40:10.287479 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerStarted","Data":"e2794a5a1ff7b18e608836e9e400d0ba334a92b93fb137e0d64b5f337be058d8"} Dec 06 06:40:11 crc kubenswrapper[4706]: I1206 06:40:11.297631 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerStarted","Data":"f9f06b5908ee3885dd1ffa030c0fa6ef1c27c6c22c421c4180ad21b13445435e"} Dec 06 06:40:12 crc kubenswrapper[4706]: I1206 06:40:12.307608 4706 generic.go:334] "Generic (PLEG): container finished" podID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerID="f9f06b5908ee3885dd1ffa030c0fa6ef1c27c6c22c421c4180ad21b13445435e" exitCode=0 Dec 06 06:40:12 crc kubenswrapper[4706]: I1206 06:40:12.307701 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerDied","Data":"f9f06b5908ee3885dd1ffa030c0fa6ef1c27c6c22c421c4180ad21b13445435e"} Dec 06 06:40:13 crc kubenswrapper[4706]: I1206 06:40:13.319131 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerStarted","Data":"f3708aeebf6a27da632eacbe05192e786a98f3086705e1a46c413d907e2c3acb"} Dec 06 06:40:13 crc kubenswrapper[4706]: I1206 06:40:13.350529 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-llkhh" podStartSLOduration=2.88735647 podStartE2EDuration="5.350507702s" podCreationTimestamp="2025-12-06 06:40:08 +0000 UTC" firstStartedPulling="2025-12-06 06:40:10.289518601 +0000 UTC m=+4832.617342545" lastFinishedPulling="2025-12-06 06:40:12.752669833 +0000 UTC m=+4835.080493777" observedRunningTime="2025-12-06 06:40:13.337098289 +0000 UTC m=+4835.664922263" watchObservedRunningTime="2025-12-06 06:40:13.350507702 +0000 UTC m=+4835.678331646" Dec 06 06:40:16 crc kubenswrapper[4706]: I1206 06:40:16.040545 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:40:16 crc kubenswrapper[4706]: E1206 06:40:16.041304 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:40:19 crc kubenswrapper[4706]: I1206 06:40:19.341156 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:19 crc kubenswrapper[4706]: I1206 06:40:19.341525 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:19 crc kubenswrapper[4706]: I1206 06:40:19.391687 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:19 crc kubenswrapper[4706]: I1206 06:40:19.453743 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:19 crc kubenswrapper[4706]: I1206 06:40:19.643097 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-llkhh"] Dec 06 06:40:21 crc kubenswrapper[4706]: I1206 06:40:21.403885 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-llkhh" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="registry-server" containerID="cri-o://f3708aeebf6a27da632eacbe05192e786a98f3086705e1a46c413d907e2c3acb" gracePeriod=2 Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.416026 4706 generic.go:334] "Generic (PLEG): container finished" podID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerID="f3708aeebf6a27da632eacbe05192e786a98f3086705e1a46c413d907e2c3acb" exitCode=0 Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.416087 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerDied","Data":"f3708aeebf6a27da632eacbe05192e786a98f3086705e1a46c413d907e2c3acb"} Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.416293 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-llkhh" event={"ID":"c88d3c32-5532-4f61-9bde-f52455f19d10","Type":"ContainerDied","Data":"e2794a5a1ff7b18e608836e9e400d0ba334a92b93fb137e0d64b5f337be058d8"} Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.416325 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2794a5a1ff7b18e608836e9e400d0ba334a92b93fb137e0d64b5f337be058d8" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.494668 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.615441 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-catalog-content\") pod \"c88d3c32-5532-4f61-9bde-f52455f19d10\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.615553 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-snn44\" (UniqueName: \"kubernetes.io/projected/c88d3c32-5532-4f61-9bde-f52455f19d10-kube-api-access-snn44\") pod \"c88d3c32-5532-4f61-9bde-f52455f19d10\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.615617 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-utilities\") pod \"c88d3c32-5532-4f61-9bde-f52455f19d10\" (UID: \"c88d3c32-5532-4f61-9bde-f52455f19d10\") " Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.617195 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-utilities" (OuterVolumeSpecName: "utilities") pod "c88d3c32-5532-4f61-9bde-f52455f19d10" (UID: "c88d3c32-5532-4f61-9bde-f52455f19d10"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.622940 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c88d3c32-5532-4f61-9bde-f52455f19d10-kube-api-access-snn44" (OuterVolumeSpecName: "kube-api-access-snn44") pod "c88d3c32-5532-4f61-9bde-f52455f19d10" (UID: "c88d3c32-5532-4f61-9bde-f52455f19d10"). InnerVolumeSpecName "kube-api-access-snn44". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.667557 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c88d3c32-5532-4f61-9bde-f52455f19d10" (UID: "c88d3c32-5532-4f61-9bde-f52455f19d10"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.717923 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.717952 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c88d3c32-5532-4f61-9bde-f52455f19d10-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:40:22 crc kubenswrapper[4706]: I1206 06:40:22.717982 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-snn44\" (UniqueName: \"kubernetes.io/projected/c88d3c32-5532-4f61-9bde-f52455f19d10-kube-api-access-snn44\") on node \"crc\" DevicePath \"\"" Dec 06 06:40:23 crc kubenswrapper[4706]: I1206 06:40:23.423474 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-llkhh" Dec 06 06:40:23 crc kubenswrapper[4706]: I1206 06:40:23.454097 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-llkhh"] Dec 06 06:40:23 crc kubenswrapper[4706]: I1206 06:40:23.461911 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-llkhh"] Dec 06 06:40:24 crc kubenswrapper[4706]: I1206 06:40:24.060828 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" path="/var/lib/kubelet/pods/c88d3c32-5532-4f61-9bde-f52455f19d10/volumes" Dec 06 06:40:28 crc kubenswrapper[4706]: I1206 06:40:28.045233 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:40:28 crc kubenswrapper[4706]: E1206 06:40:28.046029 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:40:41 crc kubenswrapper[4706]: I1206 06:40:41.039202 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:40:41 crc kubenswrapper[4706]: E1206 06:40:41.039965 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:40:56 crc kubenswrapper[4706]: I1206 06:40:56.036188 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:40:56 crc kubenswrapper[4706]: E1206 06:40:56.037037 4706 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-z27rn_openshift-machine-config-operator(ae6d3c62-ad40-492b-9c35-d0043649cb81)\"" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" Dec 06 06:41:07 crc kubenswrapper[4706]: I1206 06:41:07.036175 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:41:07 crc kubenswrapper[4706]: I1206 06:41:07.811533 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"b271083abe8b661a20301434cb5b966d7e13b322b9ea83601ca82a85bd007309"} Dec 06 06:41:37 crc kubenswrapper[4706]: I1206 06:41:37.083129 4706 generic.go:334] "Generic (PLEG): container finished" podID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerID="c9b268c81c0b2a440404ae7fbd529a91cb43cd4b27b73f39149e6b798f5b022d" exitCode=0 Dec 06 06:41:37 crc kubenswrapper[4706]: I1206 06:41:37.083192 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-t68mk/must-gather-gxctj" event={"ID":"b43aae1d-2a9a-44de-a8cf-c18c9660199a","Type":"ContainerDied","Data":"c9b268c81c0b2a440404ae7fbd529a91cb43cd4b27b73f39149e6b798f5b022d"} Dec 06 06:41:37 crc kubenswrapper[4706]: I1206 06:41:37.084286 4706 scope.go:117] "RemoveContainer" containerID="c9b268c81c0b2a440404ae7fbd529a91cb43cd4b27b73f39149e6b798f5b022d" Dec 06 06:41:37 crc kubenswrapper[4706]: I1206 06:41:37.870241 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t68mk_must-gather-gxctj_b43aae1d-2a9a-44de-a8cf-c18c9660199a/gather/0.log" Dec 06 06:41:49 crc kubenswrapper[4706]: I1206 06:41:49.386862 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-t68mk/must-gather-gxctj"] Dec 06 06:41:49 crc kubenswrapper[4706]: I1206 06:41:49.387682 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-t68mk/must-gather-gxctj" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="copy" containerID="cri-o://96573c9c398a2c4facac229c941bd87216f4808273c7853d13db00c40d346e1d" gracePeriod=2 Dec 06 06:41:49 crc kubenswrapper[4706]: I1206 06:41:49.403395 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-t68mk/must-gather-gxctj"] Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.207833 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t68mk_must-gather-gxctj_b43aae1d-2a9a-44de-a8cf-c18c9660199a/copy/0.log" Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.209529 4706 generic.go:334] "Generic (PLEG): container finished" podID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerID="96573c9c398a2c4facac229c941bd87216f4808273c7853d13db00c40d346e1d" exitCode=143 Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.534556 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t68mk_must-gather-gxctj_b43aae1d-2a9a-44de-a8cf-c18c9660199a/copy/0.log" Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.535127 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.561733 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b43aae1d-2a9a-44de-a8cf-c18c9660199a-must-gather-output\") pod \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.562186 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dj4wm\" (UniqueName: \"kubernetes.io/projected/b43aae1d-2a9a-44de-a8cf-c18c9660199a-kube-api-access-dj4wm\") pod \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\" (UID: \"b43aae1d-2a9a-44de-a8cf-c18c9660199a\") " Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.571123 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b43aae1d-2a9a-44de-a8cf-c18c9660199a-kube-api-access-dj4wm" (OuterVolumeSpecName: "kube-api-access-dj4wm") pod "b43aae1d-2a9a-44de-a8cf-c18c9660199a" (UID: "b43aae1d-2a9a-44de-a8cf-c18c9660199a"). InnerVolumeSpecName "kube-api-access-dj4wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.663822 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dj4wm\" (UniqueName: \"kubernetes.io/projected/b43aae1d-2a9a-44de-a8cf-c18c9660199a-kube-api-access-dj4wm\") on node \"crc\" DevicePath \"\"" Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.710311 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b43aae1d-2a9a-44de-a8cf-c18c9660199a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "b43aae1d-2a9a-44de-a8cf-c18c9660199a" (UID: "b43aae1d-2a9a-44de-a8cf-c18c9660199a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:41:51 crc kubenswrapper[4706]: I1206 06:41:51.766158 4706 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/b43aae1d-2a9a-44de-a8cf-c18c9660199a-must-gather-output\") on node \"crc\" DevicePath \"\"" Dec 06 06:41:52 crc kubenswrapper[4706]: I1206 06:41:52.047203 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" path="/var/lib/kubelet/pods/b43aae1d-2a9a-44de-a8cf-c18c9660199a/volumes" Dec 06 06:41:52 crc kubenswrapper[4706]: I1206 06:41:52.219835 4706 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-t68mk_must-gather-gxctj_b43aae1d-2a9a-44de-a8cf-c18c9660199a/copy/0.log" Dec 06 06:41:52 crc kubenswrapper[4706]: I1206 06:41:52.220261 4706 scope.go:117] "RemoveContainer" containerID="96573c9c398a2c4facac229c941bd87216f4808273c7853d13db00c40d346e1d" Dec 06 06:41:52 crc kubenswrapper[4706]: I1206 06:41:52.220304 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-t68mk/must-gather-gxctj" Dec 06 06:41:52 crc kubenswrapper[4706]: I1206 06:41:52.244078 4706 scope.go:117] "RemoveContainer" containerID="c9b268c81c0b2a440404ae7fbd529a91cb43cd4b27b73f39149e6b798f5b022d" Dec 06 06:43:13 crc kubenswrapper[4706]: I1206 06:43:13.141875 4706 scope.go:117] "RemoveContainer" containerID="b9570aa19c6d3c854b0d71d08d1354b81edb3434927cb46927d11639dfa0b1e6" Dec 06 06:43:35 crc kubenswrapper[4706]: I1206 06:43:35.962127 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:43:35 crc kubenswrapper[4706]: I1206 06:43:35.962921 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:44:05 crc kubenswrapper[4706]: I1206 06:44:05.961080 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:44:05 crc kubenswrapper[4706]: I1206 06:44:05.962108 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:44:13 crc kubenswrapper[4706]: I1206 06:44:13.215682 4706 scope.go:117] "RemoveContainer" containerID="75eacb46e68a6e88f4acd02fd9296892c5dc4ace5e7427fed0cddd83d66042b5" Dec 06 06:44:35 crc kubenswrapper[4706]: I1206 06:44:35.961827 4706 patch_prober.go:28] interesting pod/machine-config-daemon-z27rn container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Dec 06 06:44:35 crc kubenswrapper[4706]: I1206 06:44:35.962271 4706 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Dec 06 06:44:35 crc kubenswrapper[4706]: I1206 06:44:35.962320 4706 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" Dec 06 06:44:35 crc kubenswrapper[4706]: I1206 06:44:35.962996 4706 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b271083abe8b661a20301434cb5b966d7e13b322b9ea83601ca82a85bd007309"} pod="openshift-machine-config-operator/machine-config-daemon-z27rn" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Dec 06 06:44:35 crc kubenswrapper[4706]: I1206 06:44:35.963064 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" podUID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerName="machine-config-daemon" containerID="cri-o://b271083abe8b661a20301434cb5b966d7e13b322b9ea83601ca82a85bd007309" gracePeriod=600 Dec 06 06:44:36 crc kubenswrapper[4706]: I1206 06:44:36.758200 4706 generic.go:334] "Generic (PLEG): container finished" podID="ae6d3c62-ad40-492b-9c35-d0043649cb81" containerID="b271083abe8b661a20301434cb5b966d7e13b322b9ea83601ca82a85bd007309" exitCode=0 Dec 06 06:44:36 crc kubenswrapper[4706]: I1206 06:44:36.758258 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerDied","Data":"b271083abe8b661a20301434cb5b966d7e13b322b9ea83601ca82a85bd007309"} Dec 06 06:44:36 crc kubenswrapper[4706]: I1206 06:44:36.758538 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-z27rn" event={"ID":"ae6d3c62-ad40-492b-9c35-d0043649cb81","Type":"ContainerStarted","Data":"094bfa841b6f6ec536d8ecb0cd8d0d692b68d581f79d659bdd537cbaacdc7aa0"} Dec 06 06:44:36 crc kubenswrapper[4706]: I1206 06:44:36.758561 4706 scope.go:117] "RemoveContainer" containerID="18a7862fdac23c338571a4792bdf4d38b8fe272c5185714822e9e65ab24ca0ff" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.821417 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-stshk"] Dec 06 06:44:37 crc kubenswrapper[4706]: E1206 06:44:37.822248 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="extract-utilities" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822263 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="extract-utilities" Dec 06 06:44:37 crc kubenswrapper[4706]: E1206 06:44:37.822271 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="gather" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822278 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="gather" Dec 06 06:44:37 crc kubenswrapper[4706]: E1206 06:44:37.822288 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="registry-server" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822295 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="registry-server" Dec 06 06:44:37 crc kubenswrapper[4706]: E1206 06:44:37.822327 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="copy" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822334 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="copy" Dec 06 06:44:37 crc kubenswrapper[4706]: E1206 06:44:37.822343 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="extract-content" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822351 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="extract-content" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822559 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="copy" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822585 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="c88d3c32-5532-4f61-9bde-f52455f19d10" containerName="registry-server" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.822609 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="b43aae1d-2a9a-44de-a8cf-c18c9660199a" containerName="gather" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.828583 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.840996 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-stshk"] Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.927019 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-catalog-content\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.927095 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-utilities\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:37 crc kubenswrapper[4706]: I1206 06:44:37.927154 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcsxb\" (UniqueName: \"kubernetes.io/projected/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-kube-api-access-gcsxb\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.028345 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-utilities\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.028666 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcsxb\" (UniqueName: \"kubernetes.io/projected/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-kube-api-access-gcsxb\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.028784 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-utilities\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.028937 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-catalog-content\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.029401 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-catalog-content\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.048585 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcsxb\" (UniqueName: \"kubernetes.io/projected/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-kube-api-access-gcsxb\") pod \"redhat-operators-stshk\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.151968 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.727165 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-stshk"] Dec 06 06:44:38 crc kubenswrapper[4706]: I1206 06:44:38.789239 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerStarted","Data":"e141cc05e141a9a2ba652681dad19085f7db4917a2cc2e0e2cca2f98df22beaa"} Dec 06 06:44:39 crc kubenswrapper[4706]: I1206 06:44:39.811287 4706 generic.go:334] "Generic (PLEG): container finished" podID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerID="cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083" exitCode=0 Dec 06 06:44:39 crc kubenswrapper[4706]: I1206 06:44:39.811434 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerDied","Data":"cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083"} Dec 06 06:44:39 crc kubenswrapper[4706]: I1206 06:44:39.815630 4706 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Dec 06 06:44:40 crc kubenswrapper[4706]: I1206 06:44:40.824353 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerStarted","Data":"ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea"} Dec 06 06:44:41 crc kubenswrapper[4706]: I1206 06:44:41.838749 4706 generic.go:334] "Generic (PLEG): container finished" podID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerID="ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea" exitCode=0 Dec 06 06:44:41 crc kubenswrapper[4706]: I1206 06:44:41.838851 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerDied","Data":"ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea"} Dec 06 06:44:43 crc kubenswrapper[4706]: I1206 06:44:43.857684 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerStarted","Data":"082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5"} Dec 06 06:44:43 crc kubenswrapper[4706]: I1206 06:44:43.882096 4706 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-stshk" podStartSLOduration=3.995060285 podStartE2EDuration="6.882075222s" podCreationTimestamp="2025-12-06 06:44:37 +0000 UTC" firstStartedPulling="2025-12-06 06:44:39.815369374 +0000 UTC m=+5102.143193318" lastFinishedPulling="2025-12-06 06:44:42.702384321 +0000 UTC m=+5105.030208255" observedRunningTime="2025-12-06 06:44:43.873655163 +0000 UTC m=+5106.201479107" watchObservedRunningTime="2025-12-06 06:44:43.882075222 +0000 UTC m=+5106.209899186" Dec 06 06:44:48 crc kubenswrapper[4706]: I1206 06:44:48.153193 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:48 crc kubenswrapper[4706]: I1206 06:44:48.154558 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:48 crc kubenswrapper[4706]: I1206 06:44:48.209540 4706 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:48 crc kubenswrapper[4706]: I1206 06:44:48.942917 4706 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:48 crc kubenswrapper[4706]: I1206 06:44:48.997002 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-stshk"] Dec 06 06:44:50 crc kubenswrapper[4706]: I1206 06:44:50.912266 4706 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-stshk" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="registry-server" containerID="cri-o://082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5" gracePeriod=2 Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.403028 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.506577 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcsxb\" (UniqueName: \"kubernetes.io/projected/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-kube-api-access-gcsxb\") pod \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.506744 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-utilities\") pod \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.506871 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-catalog-content\") pod \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\" (UID: \"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7\") " Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.514423 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-kube-api-access-gcsxb" (OuterVolumeSpecName: "kube-api-access-gcsxb") pod "e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" (UID: "e3a9ece9-3ffa-4da0-b14e-3a5206434ab7"). InnerVolumeSpecName "kube-api-access-gcsxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.520650 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-utilities" (OuterVolumeSpecName: "utilities") pod "e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" (UID: "e3a9ece9-3ffa-4da0-b14e-3a5206434ab7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.609071 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcsxb\" (UniqueName: \"kubernetes.io/projected/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-kube-api-access-gcsxb\") on node \"crc\" DevicePath \"\"" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.609109 4706 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-utilities\") on node \"crc\" DevicePath \"\"" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.922820 4706 generic.go:334] "Generic (PLEG): container finished" podID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerID="082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5" exitCode=0 Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.922891 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-stshk" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.922891 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerDied","Data":"082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5"} Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.922965 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-stshk" event={"ID":"e3a9ece9-3ffa-4da0-b14e-3a5206434ab7","Type":"ContainerDied","Data":"e141cc05e141a9a2ba652681dad19085f7db4917a2cc2e0e2cca2f98df22beaa"} Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.922988 4706 scope.go:117] "RemoveContainer" containerID="082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.953992 4706 scope.go:117] "RemoveContainer" containerID="ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea" Dec 06 06:44:51 crc kubenswrapper[4706]: I1206 06:44:51.981090 4706 scope.go:117] "RemoveContainer" containerID="cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083" Dec 06 06:44:52 crc kubenswrapper[4706]: I1206 06:44:52.023537 4706 scope.go:117] "RemoveContainer" containerID="082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5" Dec 06 06:44:52 crc kubenswrapper[4706]: E1206 06:44:52.024408 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5\": container with ID starting with 082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5 not found: ID does not exist" containerID="082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5" Dec 06 06:44:52 crc kubenswrapper[4706]: I1206 06:44:52.024457 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5"} err="failed to get container status \"082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5\": rpc error: code = NotFound desc = could not find container \"082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5\": container with ID starting with 082ad6d9291990cac9b146151e3fb67b42c516008d0e27be2a727f9b7d6fc2d5 not found: ID does not exist" Dec 06 06:44:52 crc kubenswrapper[4706]: I1206 06:44:52.024493 4706 scope.go:117] "RemoveContainer" containerID="ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea" Dec 06 06:44:52 crc kubenswrapper[4706]: E1206 06:44:52.024970 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea\": container with ID starting with ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea not found: ID does not exist" containerID="ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea" Dec 06 06:44:52 crc kubenswrapper[4706]: I1206 06:44:52.025011 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea"} err="failed to get container status \"ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea\": rpc error: code = NotFound desc = could not find container \"ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea\": container with ID starting with ded628f5b8aa349fee63ba2cdb4d341d6d5cfc9fab6bc5610793c141369cdfea not found: ID does not exist" Dec 06 06:44:52 crc kubenswrapper[4706]: I1206 06:44:52.025036 4706 scope.go:117] "RemoveContainer" containerID="cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083" Dec 06 06:44:52 crc kubenswrapper[4706]: E1206 06:44:52.025403 4706 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083\": container with ID starting with cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083 not found: ID does not exist" containerID="cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083" Dec 06 06:44:52 crc kubenswrapper[4706]: I1206 06:44:52.025443 4706 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083"} err="failed to get container status \"cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083\": rpc error: code = NotFound desc = could not find container \"cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083\": container with ID starting with cb4d2d9a6480b2db0f405a80b47bb004e80adbe38eb01cd23b7a4b402128f083 not found: ID does not exist" Dec 06 06:44:54 crc kubenswrapper[4706]: I1206 06:44:54.541139 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" (UID: "e3a9ece9-3ffa-4da0-b14e-3a5206434ab7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Dec 06 06:44:54 crc kubenswrapper[4706]: I1206 06:44:54.563107 4706 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7-catalog-content\") on node \"crc\" DevicePath \"\"" Dec 06 06:44:54 crc kubenswrapper[4706]: I1206 06:44:54.678923 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-stshk"] Dec 06 06:44:54 crc kubenswrapper[4706]: I1206 06:44:54.690498 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-stshk"] Dec 06 06:44:56 crc kubenswrapper[4706]: I1206 06:44:56.047959 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" path="/var/lib/kubelet/pods/e3a9ece9-3ffa-4da0-b14e-3a5206434ab7/volumes" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.145289 4706 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9"] Dec 06 06:45:00 crc kubenswrapper[4706]: E1206 06:45:00.146170 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="registry-server" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.146186 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="registry-server" Dec 06 06:45:00 crc kubenswrapper[4706]: E1206 06:45:00.146205 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="extract-content" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.146214 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="extract-content" Dec 06 06:45:00 crc kubenswrapper[4706]: E1206 06:45:00.146234 4706 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="extract-utilities" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.146298 4706 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="extract-utilities" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.146516 4706 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3a9ece9-3ffa-4da0-b14e-3a5206434ab7" containerName="registry-server" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.147330 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.149125 4706 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.150013 4706 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.156967 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9"] Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.278847 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0b13110-8c31-4674-b5e7-76b33b76a210-secret-volume\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.278897 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0b13110-8c31-4674-b5e7-76b33b76a210-config-volume\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.279117 4706 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-95cb9\" (UniqueName: \"kubernetes.io/projected/f0b13110-8c31-4674-b5e7-76b33b76a210-kube-api-access-95cb9\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.381018 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0b13110-8c31-4674-b5e7-76b33b76a210-secret-volume\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.381444 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0b13110-8c31-4674-b5e7-76b33b76a210-config-volume\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.381601 4706 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-95cb9\" (UniqueName: \"kubernetes.io/projected/f0b13110-8c31-4674-b5e7-76b33b76a210-kube-api-access-95cb9\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.382298 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0b13110-8c31-4674-b5e7-76b33b76a210-config-volume\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.398795 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0b13110-8c31-4674-b5e7-76b33b76a210-secret-volume\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.400035 4706 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-95cb9\" (UniqueName: \"kubernetes.io/projected/f0b13110-8c31-4674-b5e7-76b33b76a210-kube-api-access-95cb9\") pod \"collect-profiles-29416725-jlwv9\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.473296 4706 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:00 crc kubenswrapper[4706]: I1206 06:45:00.948647 4706 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9"] Dec 06 06:45:01 crc kubenswrapper[4706]: I1206 06:45:01.009586 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" event={"ID":"f0b13110-8c31-4674-b5e7-76b33b76a210","Type":"ContainerStarted","Data":"64bf73572ab07496436a4d63f6f10b919948c6857b8fce9c479c7f6b732fec38"} Dec 06 06:45:02 crc kubenswrapper[4706]: I1206 06:45:02.033862 4706 generic.go:334] "Generic (PLEG): container finished" podID="f0b13110-8c31-4674-b5e7-76b33b76a210" containerID="1ac687e956521cf006670829411bebe2f2534ea413045ef7af2db853e53ac8b2" exitCode=0 Dec 06 06:45:02 crc kubenswrapper[4706]: I1206 06:45:02.034114 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" event={"ID":"f0b13110-8c31-4674-b5e7-76b33b76a210","Type":"ContainerDied","Data":"1ac687e956521cf006670829411bebe2f2534ea413045ef7af2db853e53ac8b2"} Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.435593 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.450981 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0b13110-8c31-4674-b5e7-76b33b76a210-config-volume\") pod \"f0b13110-8c31-4674-b5e7-76b33b76a210\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.451052 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-95cb9\" (UniqueName: \"kubernetes.io/projected/f0b13110-8c31-4674-b5e7-76b33b76a210-kube-api-access-95cb9\") pod \"f0b13110-8c31-4674-b5e7-76b33b76a210\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.451158 4706 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0b13110-8c31-4674-b5e7-76b33b76a210-secret-volume\") pod \"f0b13110-8c31-4674-b5e7-76b33b76a210\" (UID: \"f0b13110-8c31-4674-b5e7-76b33b76a210\") " Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.454652 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0b13110-8c31-4674-b5e7-76b33b76a210-config-volume" (OuterVolumeSpecName: "config-volume") pod "f0b13110-8c31-4674-b5e7-76b33b76a210" (UID: "f0b13110-8c31-4674-b5e7-76b33b76a210"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.462264 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0b13110-8c31-4674-b5e7-76b33b76a210-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f0b13110-8c31-4674-b5e7-76b33b76a210" (UID: "f0b13110-8c31-4674-b5e7-76b33b76a210"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.463426 4706 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0b13110-8c31-4674-b5e7-76b33b76a210-kube-api-access-95cb9" (OuterVolumeSpecName: "kube-api-access-95cb9") pod "f0b13110-8c31-4674-b5e7-76b33b76a210" (UID: "f0b13110-8c31-4674-b5e7-76b33b76a210"). InnerVolumeSpecName "kube-api-access-95cb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.554550 4706 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-95cb9\" (UniqueName: \"kubernetes.io/projected/f0b13110-8c31-4674-b5e7-76b33b76a210-kube-api-access-95cb9\") on node \"crc\" DevicePath \"\"" Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.554611 4706 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f0b13110-8c31-4674-b5e7-76b33b76a210-secret-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:45:03 crc kubenswrapper[4706]: I1206 06:45:03.554622 4706 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f0b13110-8c31-4674-b5e7-76b33b76a210-config-volume\") on node \"crc\" DevicePath \"\"" Dec 06 06:45:04 crc kubenswrapper[4706]: I1206 06:45:04.066753 4706 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" event={"ID":"f0b13110-8c31-4674-b5e7-76b33b76a210","Type":"ContainerDied","Data":"64bf73572ab07496436a4d63f6f10b919948c6857b8fce9c479c7f6b732fec38"} Dec 06 06:45:04 crc kubenswrapper[4706]: I1206 06:45:04.066802 4706 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64bf73572ab07496436a4d63f6f10b919948c6857b8fce9c479c7f6b732fec38" Dec 06 06:45:04 crc kubenswrapper[4706]: I1206 06:45:04.066810 4706 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29416725-jlwv9" Dec 06 06:45:04 crc kubenswrapper[4706]: I1206 06:45:04.506692 4706 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv"] Dec 06 06:45:04 crc kubenswrapper[4706]: I1206 06:45:04.514051 4706 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29416680-r67cv"] Dec 06 06:45:06 crc kubenswrapper[4706]: I1206 06:45:06.047535 4706 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c32a94c6-6869-41d4-bc7a-c2aa66ba68ad" path="/var/lib/kubelet/pods/c32a94c6-6869-41d4-bc7a-c2aa66ba68ad/volumes" Dec 06 06:45:13 crc kubenswrapper[4706]: I1206 06:45:13.284198 4706 scope.go:117] "RemoveContainer" containerID="ea5d1edebf731a92a660007b0d6e70a5086d41553d9871268b397ca23cfd2df5" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515114750446024454 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015114750447017372 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015114736016016511 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015114736017015462 5ustar corecore